feat(annotations): Add annotations to rewrite requests (#68)

* feat(annotations): Add annotations to rewrite requests

* Upgrade caddy, ingress API version and some other deps

* fix graceful shutdown

* Upgrade caddy to v2.4.6 and add OCSP Check interval to global config

* Add caddy duration parser
This commit is contained in:
Marc-Antoine 2022-03-27 07:19:04 +02:00 committed by GitHub
parent d5b85e5d7a
commit e9c594cd55
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 1097 additions and 458 deletions

View File

@ -1,4 +1,4 @@
FROM golang:1.14.2-alpine AS builder
FROM golang:1.16.7-alpine AS builder
WORKDIR /app
COPY go.mod go.sum ./
@ -13,7 +13,7 @@ RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ./bin/ingress-controller .
FROM alpine:latest AS certs
RUN apk --update add ca-certificates
FROM scratch
FROM alpine:latest
COPY --from=builder /app/bin/ingress-controller .
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
EXPOSE 80 443

View File

@ -5,7 +5,12 @@ for monitoring `Ingress` resources on a Kubernetes cluster and includes support
for providing automatic HTTPS certificates for all hostnames defined in ingress
resources that it is managing.
## Cloud Provider Setup (AWS, GCLOUD, ETC...)
## Prerequisites
- Helm 3+
- Kubernetes 1.19+
## Setup
In the `charts` folder a Helm Chart is provided to make installing the Caddy
Ingress Controller on a Kubernetes cluster straight forward. To install the
@ -14,19 +19,19 @@ Caddy Ingress Controller adhere to the following steps:
1. Create a new namespace in your cluster to isolate all Caddy resources.
```sh
kubectl create namespace caddy-system
kubectl create namespace caddy-system
```
2. Install the Helm Chart.
```sh
helm install \
--namespace=caddy-system \
--repo https://caddyserver.github.io/ingress/ \
--atomic \
--set image.tag=latest \
mycaddy \
caddy-ingress-controller
helm install \
--namespace=caddy-system \
--repo https://caddyserver.github.io/ingress/ \
--atomic \
--set image.tag=latest \
mycaddy \
caddy-ingress-controller
```
The helm chart create a service of type `LoadBalancer` in the `caddy-system`
@ -44,7 +49,7 @@ pod logs of the Caddy Ingress Controller.
Get the pod name with:
```sh
kubectl get pods -n caddy-system
kubectl get pods -n caddy-system
```
View the pod logs:
@ -61,7 +66,6 @@ the argument `ingressController.autotls=true` and the email to use
chart values.
Example:
- `--set ingressController.autotls=true`
- `--set ingressController.email=your@email.com`
@ -103,3 +107,11 @@ spec:
- test.com
secretName: mycerts # use mycerts for host test.com
```
### Contribution
Learn how to start contribution on the [Contributing Guidline](CONTRIBUTING.md).
## License
[Apache License 2.0](LICENSE.txt)

View File

@ -4,7 +4,7 @@ home: https://github.com/caddyserver/ingress
description: A helm chart for the Caddy Kubernetes ingress controller
icon: https://caddyserver.com/resources/images/caddy-circle-lock.svg
type: application
version: 0.0.1-rc3
version: 0.0.1-rc4
keywords:
- ingress-controller
- caddyserver

View File

@ -17,7 +17,7 @@ This chart bootstraps a caddy-ingress-deployment deployment on a [Kubernetes](ht
## Prerequisites
- Helm 3+
- Kubernetes 1.14+
- Kubernetes 1.19+
## Installing the Chart
@ -60,6 +60,7 @@ The command removes all the Kubernetes components associated with the chart and
| ingressController.config.proxyProtocol | bool | `false` | |
| ingressController.rbac.create | bool | `true` | |
| ingressController.verbose | bool | `false` | |
| ingressController.leaseId | string | `""` | |
| ingressController.watchNamespace | string | `""` | |
| minikube | bool | `false` | |
| nameOverride | string | `""` | |

View File

@ -70,6 +70,9 @@ spec:
{{- if .Values.ingressController.watchNamespace }}
- -namespace={{ .Values.ingressController.watchNamespace }}
{{- end }}
{{- if .Values.ingressController.leaseId }}
- -lease-id={{ .Values.ingressController.leaseId }}
{{- end }}
{{- if .Values.ingressController.verbose }}
- -v
{{- end }}

View File

@ -90,6 +90,10 @@
}
}
},
"leaseId": {
"$id": "#/properties/ingressController/properties/leaseId",
"type": "string"
},
"config": {
"$id": "#/properties/ingressController/properties/config",
"type": "object",

View File

@ -22,6 +22,7 @@ ingressController:
rbac:
create: true
leaseId: ""
config:
# -- Acme Server URL
acmeCA: ""

View File

@ -9,6 +9,9 @@ import (
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"os"
"os/signal"
"syscall"
"time"
)
@ -49,7 +52,6 @@ func main() {
}
stopCh := make(chan struct{}, 1)
defer close(stopCh)
c := controller.NewCaddyController(logger, kubeClient, cfg, caddy.Converter{}, stopCh)
@ -57,7 +59,13 @@ func main() {
logger.Info("Starting the caddy ingress controller")
go c.Run()
// TODO :- listen to sigterm
// Listen for SIGINT and SIGTERM signals
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, os.Interrupt, syscall.SIGTERM)
<-sigs
close(stopCh)
// Let controller exit the process
select {}
}

13
go.mod
View File

@ -1,16 +1,15 @@
module github.com/caddyserver/ingress
go 1.14
go 1.16
require (
github.com/caddyserver/caddy/v2 v2.3.0-rc.1
github.com/caddyserver/certmagic v0.12.1-0.20201209195841-b726d1ed13c3
github.com/google/uuid v1.1.1
github.com/mitchellh/mapstructure v1.1.2
github.com/caddyserver/caddy/v2 v2.4.6
github.com/caddyserver/certmagic v0.15.2
github.com/google/uuid v1.3.0
github.com/mitchellh/mapstructure v1.4.3
github.com/pires/go-proxyproto v0.3.1
github.com/pkg/errors v0.9.1
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.16.0
go.uber.org/zap v1.19.0
gopkg.in/go-playground/assert.v1 v1.2.1 // indirect
gopkg.in/go-playground/pool.v3 v3.1.1
k8s.io/api v0.19.4

1272
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -43,7 +43,8 @@ func LoadConfigMapOptions(config *Config, store *controller.Store) error {
}
tlsApp.Automation = &caddytls.AutomationConfig{
OnDemand: onDemandConfig,
OnDemand: onDemandConfig,
OCSPCheckInterval: cfgMap.OCSPCheckInterval,
Policies: []*caddytls.AutomationPolicy{
{
IssuersRaw: []json.RawMessage{

View File

@ -6,25 +6,77 @@ import (
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
"github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy"
"github.com/caddyserver/caddy/v2/modules/caddyhttp/rewrite"
"github.com/caddyserver/ingress/internal/controller"
"k8s.io/api/networking/v1beta1"
"k8s.io/api/networking/v1"
)
const (
annotationPrefix = "caddy.ingress.kubernetes.io"
rewriteToAnnotation = "rewrite-to"
rewriteStripPrefixAnnotation = "rewrite-strip-prefix"
disableSSLRedirect = "disable-ssl-redirect"
)
func getAnnotation(ing *v1.Ingress, rule string) string {
return ing.Annotations[annotationPrefix+"/"+rule]
}
// TODO :- configure log middleware for all routes
func baseRoute(upstream string) caddyhttp.Route {
return caddyhttp.Route{
HandlersRaw: []json.RawMessage{
json.RawMessage(`
{
"handler": "reverse_proxy",
"upstreams": [
{
"dial": "` + fmt.Sprintf("%s", upstream) + `"
}
]
}
`),
func generateRoute(ing *v1.Ingress, rule v1.IngressRule, path v1.HTTPIngressPath) caddyhttp.Route {
var handlers []json.RawMessage
// Generate handlers
rewriteTo := getAnnotation(ing, rewriteToAnnotation)
if rewriteTo != "" {
handlers = append(handlers, caddyconfig.JSONModuleObject(
rewrite.Rewrite{URI: rewriteTo},
"handler", "rewrite", nil,
))
}
rewriteStripPrefix := getAnnotation(ing, rewriteStripPrefixAnnotation)
if rewriteStripPrefix != "" {
handlers = append(handlers, caddyconfig.JSONModuleObject(
rewrite.Rewrite{StripPathPrefix: rewriteStripPrefix},
"handler", "rewrite", nil,
))
}
clusterHostName := fmt.Sprintf("%v.%v.svc.cluster.local:%d", path.Backend.Service.Name, ing.Namespace, path.Backend.Service.Port.Number)
handlers = append(handlers, caddyconfig.JSONModuleObject(
reverseproxy.Handler{
Upstreams: reverseproxy.UpstreamPool{
{Dial: clusterHostName},
},
},
"handler", "reverse_proxy", nil,
))
// Generate matchers
match := caddy.ModuleMap{}
if getAnnotation(ing, disableSSLRedirect) != "true" {
match["protocol"] = caddyconfig.JSON(caddyhttp.MatchProtocol("https"), nil)
}
if rule.Host != "" {
match["host"] = caddyconfig.JSON(caddyhttp.MatchHost{rule.Host}, nil)
}
if path.Path != "" {
p := path.Path
if *path.PathType == v1.PathTypePrefix {
p += "*"
}
match["path"] = caddyconfig.JSON(caddyhttp.MatchPath{p}, nil)
}
return caddyhttp.Route{
HandlersRaw: handlers,
MatcherSetsRaw: []caddy.ModuleMap{match},
}
}
@ -40,29 +92,7 @@ func LoadIngressConfig(config *Config, store *controller.Store) error {
for _, ing := range store.Ingresses {
for _, rule := range ing.Spec.Rules {
for _, path := range rule.HTTP.Paths {
clusterHostName := fmt.Sprintf("%v.%v.svc.cluster.local:%d", path.Backend.ServiceName, ing.Namespace, path.Backend.ServicePort.IntVal)
r := baseRoute(clusterHostName)
match := caddy.ModuleMap{
// match only on https protocol to allow HTTPS redirects
// TODO Let user disable this to serve HTTP requests
"protocol": caddyconfig.JSON(caddyhttp.MatchProtocol("https"), nil),
}
if rule.Host != "" {
match["host"] = caddyconfig.JSON(caddyhttp.MatchHost{rule.Host}, nil)
}
if path.Path != "" {
p := path.Path
if *path.PathType == v1beta1.PathTypePrefix {
p += "*"
}
match["path"] = caddyconfig.JSON(caddyhttp.MatchPath{p}, nil)
}
r.MatcherSetsRaw = []caddy.ModuleMap{match}
r := generateRoute(ing, rule, path)
routes = append(routes, r)
}

View File

@ -1,34 +1,34 @@
package controller
import (
"k8s.io/api/networking/v1beta1"
"k8s.io/api/networking/v1"
)
// IngressAddedAction provides an implementation of the action interface.
type IngressAddedAction struct {
resource *v1beta1.Ingress
resource *v1.Ingress
}
// IngressUpdatedAction provides an implementation of the action interface.
type IngressUpdatedAction struct {
resource *v1beta1.Ingress
oldResource *v1beta1.Ingress
resource *v1.Ingress
oldResource *v1.Ingress
}
// IngressDeletedAction provides an implementation of the action interface.
type IngressDeletedAction struct {
resource *v1beta1.Ingress
resource *v1.Ingress
}
// onIngressAdded runs when an ingress resource is added to the cluster.
func (c *CaddyController) onIngressAdded(obj *v1beta1.Ingress) {
func (c *CaddyController) onIngressAdded(obj *v1.Ingress) {
c.syncQueue.Add(IngressAddedAction{
resource: obj,
})
}
// onIngressUpdated is run when an ingress resource is updated in the cluster.
func (c *CaddyController) onIngressUpdated(old *v1beta1.Ingress, new *v1beta1.Ingress) {
func (c *CaddyController) onIngressUpdated(old *v1.Ingress, new *v1.Ingress) {
c.syncQueue.Add(IngressUpdatedAction{
resource: new,
oldResource: old,
@ -36,7 +36,7 @@ func (c *CaddyController) onIngressUpdated(old *v1beta1.Ingress, new *v1beta1.In
}
// onIngressDeleted is run when an ingress resource is deleted from the cluster.
func (c *CaddyController) onIngressDeleted(obj *v1beta1.Ingress) {
func (c *CaddyController) onIngressDeleted(obj *v1.Ingress) {
c.syncQueue.Add(IngressDeletedAction{
resource: obj,
})

View File

@ -5,7 +5,7 @@ import (
"go.uber.org/zap"
"gopkg.in/go-playground/pool.v3"
apiv1 "k8s.io/api/core/v1"
"k8s.io/api/networking/v1beta1"
"k8s.io/api/networking/v1"
"k8s.io/client-go/kubernetes"
"net"
"sort"
@ -27,7 +27,7 @@ func (r SyncStatusAction) handle(c *CaddyController) error {
}
// syncStatus ensures that the ingress source address points to this ingress controller's IP address.
func (c *CaddyController) syncStatus(ings []*v1beta1.Ingress) error {
func (c *CaddyController) syncStatus(ings []*v1.Ingress) error {
addrs, err := k8s.GetAddresses(c.podInfo, c.kubeClient)
if err != nil {
return err
@ -41,7 +41,7 @@ func (c *CaddyController) syncStatus(ings []*v1beta1.Ingress) error {
// updateIngStatuses starts a queue and adds all monitored ingresses to update their status source address to the on
// that the ingress controller is running on. This is called by the syncStatus queue.
func (c *CaddyController) updateIngStatuses(controllerAddresses []apiv1.LoadBalancerIngress, ings []*v1beta1.Ingress) {
func (c *CaddyController) updateIngStatuses(controllerAddresses []apiv1.LoadBalancerIngress, ings []*v1.Ingress) {
p := pool.NewLimited(10)
defer p.Close()
@ -66,7 +66,7 @@ func (c *CaddyController) updateIngStatuses(controllerAddresses []apiv1.LoadBala
}
// runUpdate updates the ingress status field.
func runUpdate(logger *zap.SugaredLogger, ing *v1beta1.Ingress, status []apiv1.LoadBalancerIngress, client *kubernetes.Clientset) pool.WorkFunc {
func runUpdate(logger *zap.SugaredLogger, ing *v1.Ingress, status []apiv1.LoadBalancerIngress, client *kubernetes.Clientset) pool.WorkFunc {
return func(wu pool.WorkUnit) (interface{}, error) {
if wu.IsCancelled() {
return nil, nil

View File

@ -5,11 +5,12 @@ import (
"encoding/json"
"fmt"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/certmagic"
"github.com/caddyserver/ingress/internal/k8s"
"github.com/caddyserver/ingress/pkg/storage"
"go.uber.org/zap"
apiv1 "k8s.io/api/core/v1"
"k8s.io/api/networking/v1beta1"
"k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
@ -52,7 +53,7 @@ type Options struct {
type Store struct {
Options *Options
ConfigMap *k8s.ConfigMapOptions
Ingresses []*v1beta1.Ingress
Ingresses []*v1.Ingress
}
// Informer defines the required SharedIndexInformers that interact with the API server.
@ -176,6 +177,12 @@ func NewCaddyController(
func (c *CaddyController) Shutdown() error {
// remove this ingress controller's ip from ingress resources.
c.updateIngStatuses([]apiv1.LoadBalancerIngress{{}}, c.resourceStore.Ingresses)
if err := caddy.Stop(); err != nil {
c.logger.Error("failed to stop caddy server", zap.Error(err))
return err
}
certmagic.CleanUpOwnLocks(c.logger.Desugar())
return nil
}
@ -274,7 +281,7 @@ func (c *CaddyController) reloadCaddy() error {
return nil
}
c.logger.Debug("reloading caddy with config %v" + string(j))
c.logger.Debug("reloading caddy with config", string(j))
err = caddy.Load(j, false)
if err != nil {
return fmt.Errorf("could not reload caddy config %v", err.Error())

View File

@ -2,21 +2,21 @@ package controller
import (
"github.com/caddyserver/ingress/internal/k8s"
"k8s.io/api/networking/v1beta1"
"k8s.io/api/networking/v1"
)
// NewStore returns a new store that keeps track of K8S resources needed by the controller.
func NewStore(opts Options) *Store {
s := &Store{
Options: &opts,
Ingresses: []*v1beta1.Ingress{},
Ingresses: []*v1.Ingress{},
ConfigMap: &k8s.ConfigMapOptions{},
}
return s
}
// AddIngress adds an ingress to the store. It updates the element at the given index if it is unique.
func (s *Store) AddIngress(ing *v1beta1.Ingress) {
func (s *Store) AddIngress(ing *v1.Ingress) {
isUniq := true
for i := range s.Ingresses {
@ -33,7 +33,7 @@ func (s *Store) AddIngress(ing *v1beta1.Ingress) {
}
// PluckIngress removes the ingress passed in as an argument from the stores list of ingresses.
func (s *Store) PluckIngress(ing *v1beta1.Ingress) {
func (s *Store) PluckIngress(ing *v1.Ingress) {
id := ing.GetUID()
var index int

View File

@ -7,6 +7,8 @@ import (
v12 "k8s.io/api/core/v1"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
"reflect"
"time"
)
// ConfigMapOptions represents global options set through a configmap
@ -20,6 +22,7 @@ type ConfigMapOptions struct {
OnDemandRateLimitInterval caddy.Duration `json:"onDemandTLSRateLimitInterval,omitempty"`
OnDemandRateLimitBurst int `json:"onDemandTLSRateLimitBurst,omitempty"`
OnDemandAsk string `json:"onDemandTLSAsk,omitempty"`
OCSPCheckInterval caddy.Duration `json:"ocspCheckInterval,omitempty"`
}
type ConfigMapHandlers struct {
@ -69,6 +72,18 @@ func WatchConfigMaps(options ConfigMapParams, funcs ConfigMapHandlers) cache.Sha
return informer
}
func stringToCaddyDurationHookFunc() mapstructure.DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(caddy.Duration(time.Second)) {
return data, nil
}
return caddy.ParseDuration(data.(string))
}
}
func ParseConfigMap(cm *v12.ConfigMap) (*ConfigMapOptions, error) {
// parse configmap
cfgMap := ConfigMapOptions{}
@ -77,6 +92,9 @@ func ParseConfigMap(cm *v12.ConfigMap) (*ConfigMapOptions, error) {
WeaklyTypedInput: true,
Result: &cfgMap,
TagName: "json",
DecodeHook: mapstructure.ComposeDecodeHookFunc(
stringToCaddyDurationHookFunc(),
),
}
decoder, err := mapstructure.NewDecoder(config)

View File

@ -5,7 +5,7 @@ import (
"fmt"
"github.com/pkg/errors"
apiv1 "k8s.io/api/core/v1"
"k8s.io/api/networking/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
@ -13,9 +13,9 @@ import (
)
type IngressHandlers struct {
AddFunc func(obj *v1beta1.Ingress)
UpdateFunc func(oldObj, newObj *v1beta1.Ingress)
DeleteFunc func(obj *v1beta1.Ingress)
AddFunc func(obj *networkingv1.Ingress)
UpdateFunc func(oldObj, newObj *networkingv1.Ingress)
DeleteFunc func(obj *networkingv1.Ingress)
}
type IngressParams struct {
@ -25,27 +25,26 @@ type IngressParams struct {
}
func WatchIngresses(options IngressParams, funcs IngressHandlers) cache.SharedIndexInformer {
// TODO Handle new API
informer := options.InformerFactory.Networking().V1beta1().Ingresses().Informer()
informer := options.InformerFactory.Networking().V1().Ingresses().Informer()
informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
ingress, ok := obj.(*v1beta1.Ingress)
ingress, ok := obj.(*networkingv1.Ingress)
if ok && IsControllerIngress(options, ingress) {
funcs.AddFunc(ingress)
}
},
UpdateFunc: func(oldObj, newObj interface{}) {
oldIng, ok1 := oldObj.(*v1beta1.Ingress)
newIng, ok2 := newObj.(*v1beta1.Ingress)
oldIng, ok1 := oldObj.(*networkingv1.Ingress)
newIng, ok2 := newObj.(*networkingv1.Ingress)
if ok1 && ok2 && IsControllerIngress(options, newIng) {
funcs.UpdateFunc(oldIng, newIng)
}
},
DeleteFunc: func(obj interface{}) {
ingress, ok := obj.(*v1beta1.Ingress)
ingress, ok := obj.(*networkingv1.Ingress)
if ok && IsControllerIngress(options, ingress) {
funcs.DeleteFunc(ingress)
@ -57,18 +56,20 @@ func WatchIngresses(options IngressParams, funcs IngressHandlers) cache.SharedIn
}
// IsControllerIngress check if the ingress object can be controlled by us
// TODO Handle `ingressClassName`
func IsControllerIngress(options IngressParams, ingress *v1beta1.Ingress) bool {
func IsControllerIngress(options IngressParams, ingress *networkingv1.Ingress) bool {
ingressClass := ingress.Annotations["kubernetes.io/ingress.class"]
if ingressClass == "" && ingress.Spec.IngressClassName != nil {
ingressClass = *ingress.Spec.IngressClassName
}
if !options.ClassNameRequired && ingressClass == "" {
return true
}
return ingressClass == options.ClassName
}
func UpdateIngressStatus(kubeClient *kubernetes.Clientset, ing *v1beta1.Ingress, status []apiv1.LoadBalancerIngress) (*v1beta1.Ingress, error) {
ingClient := kubeClient.NetworkingV1beta1().Ingresses(ing.Namespace)
func UpdateIngressStatus(kubeClient *kubernetes.Clientset, ing *networkingv1.Ingress, status []apiv1.LoadBalancerIngress) (*networkingv1.Ingress, error) {
ingClient := kubeClient.NetworkingV1().Ingresses(ing.Namespace)
currIng, err := ingClient.Get(context.TODO(), ing.Name, v1.GetOptions{})
if err != nil {

View File

@ -2,7 +2,7 @@ package k8s
import (
v12 "k8s.io/api/core/v1"
"k8s.io/api/networking/v1beta1"
"k8s.io/api/networking/v1"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
)
@ -48,7 +48,7 @@ func WatchTLSSecrets(options TLSSecretParams, funcs TLSSecretHandlers) cache.Sha
return informer
}
func ListTLSSecrets(options TLSSecretParams, ings []*v1beta1.Ingress) ([]*v12.Secret, error) {
func ListTLSSecrets(options TLSSecretParams, ings []*v1.Ingress) ([]*v12.Secret, error) {
lister := options.InformerFactory.Core().V1().Secrets().Lister()
tlsSecrets := []*v12.Secret{}
@ -64,7 +64,7 @@ func ListTLSSecrets(options TLSSecretParams, ings []*v1beta1.Ingress) ([]*v12.Se
return tlsSecrets, nil
}
func IsManagedTLSSecret(secret *v12.Secret, ings []*v1beta1.Ingress) bool {
func IsManagedTLSSecret(secret *v12.Secret, ings []*v1.Ingress) bool {
for _, ing := range ings {
for _, tlsRule := range ing.Spec.TLS {
if tlsRule.SecretName == secret.Name && ing.Namespace == secret.Namespace {