mirror of
https://github.com/eliasstepanik/caddy-ingess.git
synced 2026-01-11 04:28:28 +00:00
feat(annotations): Add annotations to rewrite requests (#68)
* feat(annotations): Add annotations to rewrite requests * Upgrade caddy, ingress API version and some other deps * fix graceful shutdown * Upgrade caddy to v2.4.6 and add OCSP Check interval to global config * Add caddy duration parser
This commit is contained in:
parent
d5b85e5d7a
commit
e9c594cd55
@ -1,4 +1,4 @@
|
|||||||
FROM golang:1.14.2-alpine AS builder
|
FROM golang:1.16.7-alpine AS builder
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
COPY go.mod go.sum ./
|
COPY go.mod go.sum ./
|
||||||
@ -13,7 +13,7 @@ RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ./bin/ingress-controller .
|
|||||||
FROM alpine:latest AS certs
|
FROM alpine:latest AS certs
|
||||||
RUN apk --update add ca-certificates
|
RUN apk --update add ca-certificates
|
||||||
|
|
||||||
FROM scratch
|
FROM alpine:latest
|
||||||
COPY --from=builder /app/bin/ingress-controller .
|
COPY --from=builder /app/bin/ingress-controller .
|
||||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||||
EXPOSE 80 443
|
EXPOSE 80 443
|
||||||
|
|||||||
34
README.md
34
README.md
@ -5,7 +5,12 @@ for monitoring `Ingress` resources on a Kubernetes cluster and includes support
|
|||||||
for providing automatic HTTPS certificates for all hostnames defined in ingress
|
for providing automatic HTTPS certificates for all hostnames defined in ingress
|
||||||
resources that it is managing.
|
resources that it is managing.
|
||||||
|
|
||||||
## Cloud Provider Setup (AWS, GCLOUD, ETC...)
|
## Prerequisites
|
||||||
|
|
||||||
|
- Helm 3+
|
||||||
|
- Kubernetes 1.19+
|
||||||
|
|
||||||
|
## Setup
|
||||||
|
|
||||||
In the `charts` folder a Helm Chart is provided to make installing the Caddy
|
In the `charts` folder a Helm Chart is provided to make installing the Caddy
|
||||||
Ingress Controller on a Kubernetes cluster straight forward. To install the
|
Ingress Controller on a Kubernetes cluster straight forward. To install the
|
||||||
@ -14,19 +19,19 @@ Caddy Ingress Controller adhere to the following steps:
|
|||||||
1. Create a new namespace in your cluster to isolate all Caddy resources.
|
1. Create a new namespace in your cluster to isolate all Caddy resources.
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
kubectl create namespace caddy-system
|
kubectl create namespace caddy-system
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Install the Helm Chart.
|
2. Install the Helm Chart.
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
helm install \
|
helm install \
|
||||||
--namespace=caddy-system \
|
--namespace=caddy-system \
|
||||||
--repo https://caddyserver.github.io/ingress/ \
|
--repo https://caddyserver.github.io/ingress/ \
|
||||||
--atomic \
|
--atomic \
|
||||||
--set image.tag=latest \
|
--set image.tag=latest \
|
||||||
mycaddy \
|
mycaddy \
|
||||||
caddy-ingress-controller
|
caddy-ingress-controller
|
||||||
```
|
```
|
||||||
|
|
||||||
The helm chart create a service of type `LoadBalancer` in the `caddy-system`
|
The helm chart create a service of type `LoadBalancer` in the `caddy-system`
|
||||||
@ -44,7 +49,7 @@ pod logs of the Caddy Ingress Controller.
|
|||||||
Get the pod name with:
|
Get the pod name with:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
kubectl get pods -n caddy-system
|
kubectl get pods -n caddy-system
|
||||||
```
|
```
|
||||||
|
|
||||||
View the pod logs:
|
View the pod logs:
|
||||||
@ -61,7 +66,6 @@ the argument `ingressController.autotls=true` and the email to use
|
|||||||
chart values.
|
chart values.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
- `--set ingressController.autotls=true`
|
- `--set ingressController.autotls=true`
|
||||||
- `--set ingressController.email=your@email.com`
|
- `--set ingressController.email=your@email.com`
|
||||||
|
|
||||||
@ -103,3 +107,11 @@ spec:
|
|||||||
- test.com
|
- test.com
|
||||||
secretName: mycerts # use mycerts for host test.com
|
secretName: mycerts # use mycerts for host test.com
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Contribution
|
||||||
|
|
||||||
|
Learn how to start contribution on the [Contributing Guidline](CONTRIBUTING.md).
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
[Apache License 2.0](LICENSE.txt)
|
||||||
|
|||||||
@ -4,7 +4,7 @@ home: https://github.com/caddyserver/ingress
|
|||||||
description: A helm chart for the Caddy Kubernetes ingress controller
|
description: A helm chart for the Caddy Kubernetes ingress controller
|
||||||
icon: https://caddyserver.com/resources/images/caddy-circle-lock.svg
|
icon: https://caddyserver.com/resources/images/caddy-circle-lock.svg
|
||||||
type: application
|
type: application
|
||||||
version: 0.0.1-rc3
|
version: 0.0.1-rc4
|
||||||
keywords:
|
keywords:
|
||||||
- ingress-controller
|
- ingress-controller
|
||||||
- caddyserver
|
- caddyserver
|
||||||
|
|||||||
@ -17,7 +17,7 @@ This chart bootstraps a caddy-ingress-deployment deployment on a [Kubernetes](ht
|
|||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
- Helm 3+
|
- Helm 3+
|
||||||
- Kubernetes 1.14+
|
- Kubernetes 1.19+
|
||||||
|
|
||||||
## Installing the Chart
|
## Installing the Chart
|
||||||
|
|
||||||
@ -60,6 +60,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||||||
| ingressController.config.proxyProtocol | bool | `false` | |
|
| ingressController.config.proxyProtocol | bool | `false` | |
|
||||||
| ingressController.rbac.create | bool | `true` | |
|
| ingressController.rbac.create | bool | `true` | |
|
||||||
| ingressController.verbose | bool | `false` | |
|
| ingressController.verbose | bool | `false` | |
|
||||||
|
| ingressController.leaseId | string | `""` | |
|
||||||
| ingressController.watchNamespace | string | `""` | |
|
| ingressController.watchNamespace | string | `""` | |
|
||||||
| minikube | bool | `false` | |
|
| minikube | bool | `false` | |
|
||||||
| nameOverride | string | `""` | |
|
| nameOverride | string | `""` | |
|
||||||
|
|||||||
@ -70,6 +70,9 @@ spec:
|
|||||||
{{- if .Values.ingressController.watchNamespace }}
|
{{- if .Values.ingressController.watchNamespace }}
|
||||||
- -namespace={{ .Values.ingressController.watchNamespace }}
|
- -namespace={{ .Values.ingressController.watchNamespace }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- if .Values.ingressController.leaseId }}
|
||||||
|
- -lease-id={{ .Values.ingressController.leaseId }}
|
||||||
|
{{- end }}
|
||||||
{{- if .Values.ingressController.verbose }}
|
{{- if .Values.ingressController.verbose }}
|
||||||
- -v
|
- -v
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|||||||
@ -90,6 +90,10 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"leaseId": {
|
||||||
|
"$id": "#/properties/ingressController/properties/leaseId",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
"config": {
|
"config": {
|
||||||
"$id": "#/properties/ingressController/properties/config",
|
"$id": "#/properties/ingressController/properties/config",
|
||||||
"type": "object",
|
"type": "object",
|
||||||
|
|||||||
@ -22,6 +22,7 @@ ingressController:
|
|||||||
rbac:
|
rbac:
|
||||||
create: true
|
create: true
|
||||||
|
|
||||||
|
leaseId: ""
|
||||||
config:
|
config:
|
||||||
# -- Acme Server URL
|
# -- Acme Server URL
|
||||||
acmeCA: ""
|
acmeCA: ""
|
||||||
|
|||||||
@ -9,6 +9,9 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/version"
|
"k8s.io/apimachinery/pkg/version"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -49,7 +52,6 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
stopCh := make(chan struct{}, 1)
|
stopCh := make(chan struct{}, 1)
|
||||||
defer close(stopCh)
|
|
||||||
|
|
||||||
c := controller.NewCaddyController(logger, kubeClient, cfg, caddy.Converter{}, stopCh)
|
c := controller.NewCaddyController(logger, kubeClient, cfg, caddy.Converter{}, stopCh)
|
||||||
|
|
||||||
@ -57,7 +59,13 @@ func main() {
|
|||||||
logger.Info("Starting the caddy ingress controller")
|
logger.Info("Starting the caddy ingress controller")
|
||||||
go c.Run()
|
go c.Run()
|
||||||
|
|
||||||
// TODO :- listen to sigterm
|
// Listen for SIGINT and SIGTERM signals
|
||||||
|
sigs := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(sigs, os.Interrupt, syscall.SIGTERM)
|
||||||
|
<-sigs
|
||||||
|
close(stopCh)
|
||||||
|
|
||||||
|
// Let controller exit the process
|
||||||
select {}
|
select {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
13
go.mod
13
go.mod
@ -1,16 +1,15 @@
|
|||||||
module github.com/caddyserver/ingress
|
module github.com/caddyserver/ingress
|
||||||
|
|
||||||
go 1.14
|
go 1.16
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/caddyserver/caddy/v2 v2.3.0-rc.1
|
github.com/caddyserver/caddy/v2 v2.4.6
|
||||||
github.com/caddyserver/certmagic v0.12.1-0.20201209195841-b726d1ed13c3
|
github.com/caddyserver/certmagic v0.15.2
|
||||||
github.com/google/uuid v1.1.1
|
github.com/google/uuid v1.3.0
|
||||||
github.com/mitchellh/mapstructure v1.1.2
|
github.com/mitchellh/mapstructure v1.4.3
|
||||||
github.com/pires/go-proxyproto v0.3.1
|
github.com/pires/go-proxyproto v0.3.1
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
go.uber.org/multierr v1.6.0 // indirect
|
go.uber.org/zap v1.19.0
|
||||||
go.uber.org/zap v1.16.0
|
|
||||||
gopkg.in/go-playground/assert.v1 v1.2.1 // indirect
|
gopkg.in/go-playground/assert.v1 v1.2.1 // indirect
|
||||||
gopkg.in/go-playground/pool.v3 v3.1.1
|
gopkg.in/go-playground/pool.v3 v3.1.1
|
||||||
k8s.io/api v0.19.4
|
k8s.io/api v0.19.4
|
||||||
|
|||||||
@ -43,7 +43,8 @@ func LoadConfigMapOptions(config *Config, store *controller.Store) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tlsApp.Automation = &caddytls.AutomationConfig{
|
tlsApp.Automation = &caddytls.AutomationConfig{
|
||||||
OnDemand: onDemandConfig,
|
OnDemand: onDemandConfig,
|
||||||
|
OCSPCheckInterval: cfgMap.OCSPCheckInterval,
|
||||||
Policies: []*caddytls.AutomationPolicy{
|
Policies: []*caddytls.AutomationPolicy{
|
||||||
{
|
{
|
||||||
IssuersRaw: []json.RawMessage{
|
IssuersRaw: []json.RawMessage{
|
||||||
|
|||||||
@ -6,25 +6,77 @@ import (
|
|||||||
"github.com/caddyserver/caddy/v2"
|
"github.com/caddyserver/caddy/v2"
|
||||||
"github.com/caddyserver/caddy/v2/caddyconfig"
|
"github.com/caddyserver/caddy/v2/caddyconfig"
|
||||||
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
|
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
|
||||||
|
"github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy"
|
||||||
|
"github.com/caddyserver/caddy/v2/modules/caddyhttp/rewrite"
|
||||||
"github.com/caddyserver/ingress/internal/controller"
|
"github.com/caddyserver/ingress/internal/controller"
|
||||||
"k8s.io/api/networking/v1beta1"
|
"k8s.io/api/networking/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
annotationPrefix = "caddy.ingress.kubernetes.io"
|
||||||
|
rewriteToAnnotation = "rewrite-to"
|
||||||
|
rewriteStripPrefixAnnotation = "rewrite-strip-prefix"
|
||||||
|
disableSSLRedirect = "disable-ssl-redirect"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getAnnotation(ing *v1.Ingress, rule string) string {
|
||||||
|
return ing.Annotations[annotationPrefix+"/"+rule]
|
||||||
|
}
|
||||||
|
|
||||||
// TODO :- configure log middleware for all routes
|
// TODO :- configure log middleware for all routes
|
||||||
func baseRoute(upstream string) caddyhttp.Route {
|
func generateRoute(ing *v1.Ingress, rule v1.IngressRule, path v1.HTTPIngressPath) caddyhttp.Route {
|
||||||
return caddyhttp.Route{
|
var handlers []json.RawMessage
|
||||||
HandlersRaw: []json.RawMessage{
|
|
||||||
json.RawMessage(`
|
// Generate handlers
|
||||||
{
|
rewriteTo := getAnnotation(ing, rewriteToAnnotation)
|
||||||
"handler": "reverse_proxy",
|
if rewriteTo != "" {
|
||||||
"upstreams": [
|
handlers = append(handlers, caddyconfig.JSONModuleObject(
|
||||||
{
|
rewrite.Rewrite{URI: rewriteTo},
|
||||||
"dial": "` + fmt.Sprintf("%s", upstream) + `"
|
"handler", "rewrite", nil,
|
||||||
}
|
))
|
||||||
]
|
}
|
||||||
}
|
|
||||||
`),
|
rewriteStripPrefix := getAnnotation(ing, rewriteStripPrefixAnnotation)
|
||||||
|
if rewriteStripPrefix != "" {
|
||||||
|
handlers = append(handlers, caddyconfig.JSONModuleObject(
|
||||||
|
rewrite.Rewrite{StripPathPrefix: rewriteStripPrefix},
|
||||||
|
"handler", "rewrite", nil,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
clusterHostName := fmt.Sprintf("%v.%v.svc.cluster.local:%d", path.Backend.Service.Name, ing.Namespace, path.Backend.Service.Port.Number)
|
||||||
|
handlers = append(handlers, caddyconfig.JSONModuleObject(
|
||||||
|
reverseproxy.Handler{
|
||||||
|
Upstreams: reverseproxy.UpstreamPool{
|
||||||
|
{Dial: clusterHostName},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
|
"handler", "reverse_proxy", nil,
|
||||||
|
))
|
||||||
|
|
||||||
|
// Generate matchers
|
||||||
|
match := caddy.ModuleMap{}
|
||||||
|
|
||||||
|
if getAnnotation(ing, disableSSLRedirect) != "true" {
|
||||||
|
match["protocol"] = caddyconfig.JSON(caddyhttp.MatchProtocol("https"), nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.Host != "" {
|
||||||
|
match["host"] = caddyconfig.JSON(caddyhttp.MatchHost{rule.Host}, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
if path.Path != "" {
|
||||||
|
p := path.Path
|
||||||
|
|
||||||
|
if *path.PathType == v1.PathTypePrefix {
|
||||||
|
p += "*"
|
||||||
|
}
|
||||||
|
match["path"] = caddyconfig.JSON(caddyhttp.MatchPath{p}, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
return caddyhttp.Route{
|
||||||
|
HandlersRaw: handlers,
|
||||||
|
MatcherSetsRaw: []caddy.ModuleMap{match},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -40,29 +92,7 @@ func LoadIngressConfig(config *Config, store *controller.Store) error {
|
|||||||
for _, ing := range store.Ingresses {
|
for _, ing := range store.Ingresses {
|
||||||
for _, rule := range ing.Spec.Rules {
|
for _, rule := range ing.Spec.Rules {
|
||||||
for _, path := range rule.HTTP.Paths {
|
for _, path := range rule.HTTP.Paths {
|
||||||
clusterHostName := fmt.Sprintf("%v.%v.svc.cluster.local:%d", path.Backend.ServiceName, ing.Namespace, path.Backend.ServicePort.IntVal)
|
r := generateRoute(ing, rule, path)
|
||||||
r := baseRoute(clusterHostName)
|
|
||||||
|
|
||||||
match := caddy.ModuleMap{
|
|
||||||
// match only on https protocol to allow HTTPS redirects
|
|
||||||
// TODO Let user disable this to serve HTTP requests
|
|
||||||
"protocol": caddyconfig.JSON(caddyhttp.MatchProtocol("https"), nil),
|
|
||||||
}
|
|
||||||
|
|
||||||
if rule.Host != "" {
|
|
||||||
match["host"] = caddyconfig.JSON(caddyhttp.MatchHost{rule.Host}, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
if path.Path != "" {
|
|
||||||
p := path.Path
|
|
||||||
|
|
||||||
if *path.PathType == v1beta1.PathTypePrefix {
|
|
||||||
p += "*"
|
|
||||||
}
|
|
||||||
match["path"] = caddyconfig.JSON(caddyhttp.MatchPath{p}, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
r.MatcherSetsRaw = []caddy.ModuleMap{match}
|
|
||||||
|
|
||||||
routes = append(routes, r)
|
routes = append(routes, r)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,34 +1,34 @@
|
|||||||
package controller
|
package controller
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/api/networking/v1beta1"
|
"k8s.io/api/networking/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IngressAddedAction provides an implementation of the action interface.
|
// IngressAddedAction provides an implementation of the action interface.
|
||||||
type IngressAddedAction struct {
|
type IngressAddedAction struct {
|
||||||
resource *v1beta1.Ingress
|
resource *v1.Ingress
|
||||||
}
|
}
|
||||||
|
|
||||||
// IngressUpdatedAction provides an implementation of the action interface.
|
// IngressUpdatedAction provides an implementation of the action interface.
|
||||||
type IngressUpdatedAction struct {
|
type IngressUpdatedAction struct {
|
||||||
resource *v1beta1.Ingress
|
resource *v1.Ingress
|
||||||
oldResource *v1beta1.Ingress
|
oldResource *v1.Ingress
|
||||||
}
|
}
|
||||||
|
|
||||||
// IngressDeletedAction provides an implementation of the action interface.
|
// IngressDeletedAction provides an implementation of the action interface.
|
||||||
type IngressDeletedAction struct {
|
type IngressDeletedAction struct {
|
||||||
resource *v1beta1.Ingress
|
resource *v1.Ingress
|
||||||
}
|
}
|
||||||
|
|
||||||
// onIngressAdded runs when an ingress resource is added to the cluster.
|
// onIngressAdded runs when an ingress resource is added to the cluster.
|
||||||
func (c *CaddyController) onIngressAdded(obj *v1beta1.Ingress) {
|
func (c *CaddyController) onIngressAdded(obj *v1.Ingress) {
|
||||||
c.syncQueue.Add(IngressAddedAction{
|
c.syncQueue.Add(IngressAddedAction{
|
||||||
resource: obj,
|
resource: obj,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// onIngressUpdated is run when an ingress resource is updated in the cluster.
|
// onIngressUpdated is run when an ingress resource is updated in the cluster.
|
||||||
func (c *CaddyController) onIngressUpdated(old *v1beta1.Ingress, new *v1beta1.Ingress) {
|
func (c *CaddyController) onIngressUpdated(old *v1.Ingress, new *v1.Ingress) {
|
||||||
c.syncQueue.Add(IngressUpdatedAction{
|
c.syncQueue.Add(IngressUpdatedAction{
|
||||||
resource: new,
|
resource: new,
|
||||||
oldResource: old,
|
oldResource: old,
|
||||||
@ -36,7 +36,7 @@ func (c *CaddyController) onIngressUpdated(old *v1beta1.Ingress, new *v1beta1.In
|
|||||||
}
|
}
|
||||||
|
|
||||||
// onIngressDeleted is run when an ingress resource is deleted from the cluster.
|
// onIngressDeleted is run when an ingress resource is deleted from the cluster.
|
||||||
func (c *CaddyController) onIngressDeleted(obj *v1beta1.Ingress) {
|
func (c *CaddyController) onIngressDeleted(obj *v1.Ingress) {
|
||||||
c.syncQueue.Add(IngressDeletedAction{
|
c.syncQueue.Add(IngressDeletedAction{
|
||||||
resource: obj,
|
resource: obj,
|
||||||
})
|
})
|
||||||
|
|||||||
@ -5,7 +5,7 @@ import (
|
|||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"gopkg.in/go-playground/pool.v3"
|
"gopkg.in/go-playground/pool.v3"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/api/networking/v1beta1"
|
"k8s.io/api/networking/v1"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
"net"
|
"net"
|
||||||
"sort"
|
"sort"
|
||||||
@ -27,7 +27,7 @@ func (r SyncStatusAction) handle(c *CaddyController) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// syncStatus ensures that the ingress source address points to this ingress controller's IP address.
|
// syncStatus ensures that the ingress source address points to this ingress controller's IP address.
|
||||||
func (c *CaddyController) syncStatus(ings []*v1beta1.Ingress) error {
|
func (c *CaddyController) syncStatus(ings []*v1.Ingress) error {
|
||||||
addrs, err := k8s.GetAddresses(c.podInfo, c.kubeClient)
|
addrs, err := k8s.GetAddresses(c.podInfo, c.kubeClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -41,7 +41,7 @@ func (c *CaddyController) syncStatus(ings []*v1beta1.Ingress) error {
|
|||||||
|
|
||||||
// updateIngStatuses starts a queue and adds all monitored ingresses to update their status source address to the on
|
// updateIngStatuses starts a queue and adds all monitored ingresses to update their status source address to the on
|
||||||
// that the ingress controller is running on. This is called by the syncStatus queue.
|
// that the ingress controller is running on. This is called by the syncStatus queue.
|
||||||
func (c *CaddyController) updateIngStatuses(controllerAddresses []apiv1.LoadBalancerIngress, ings []*v1beta1.Ingress) {
|
func (c *CaddyController) updateIngStatuses(controllerAddresses []apiv1.LoadBalancerIngress, ings []*v1.Ingress) {
|
||||||
p := pool.NewLimited(10)
|
p := pool.NewLimited(10)
|
||||||
defer p.Close()
|
defer p.Close()
|
||||||
|
|
||||||
@ -66,7 +66,7 @@ func (c *CaddyController) updateIngStatuses(controllerAddresses []apiv1.LoadBala
|
|||||||
}
|
}
|
||||||
|
|
||||||
// runUpdate updates the ingress status field.
|
// runUpdate updates the ingress status field.
|
||||||
func runUpdate(logger *zap.SugaredLogger, ing *v1beta1.Ingress, status []apiv1.LoadBalancerIngress, client *kubernetes.Clientset) pool.WorkFunc {
|
func runUpdate(logger *zap.SugaredLogger, ing *v1.Ingress, status []apiv1.LoadBalancerIngress, client *kubernetes.Clientset) pool.WorkFunc {
|
||||||
return func(wu pool.WorkUnit) (interface{}, error) {
|
return func(wu pool.WorkUnit) (interface{}, error) {
|
||||||
if wu.IsCancelled() {
|
if wu.IsCancelled() {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
|
|||||||
@ -5,11 +5,12 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/caddyserver/caddy/v2"
|
"github.com/caddyserver/caddy/v2"
|
||||||
|
"github.com/caddyserver/certmagic"
|
||||||
"github.com/caddyserver/ingress/internal/k8s"
|
"github.com/caddyserver/ingress/internal/k8s"
|
||||||
"github.com/caddyserver/ingress/pkg/storage"
|
"github.com/caddyserver/ingress/pkg/storage"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/api/networking/v1beta1"
|
"k8s.io/api/networking/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/runtime"
|
"k8s.io/apimachinery/pkg/util/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
@ -52,7 +53,7 @@ type Options struct {
|
|||||||
type Store struct {
|
type Store struct {
|
||||||
Options *Options
|
Options *Options
|
||||||
ConfigMap *k8s.ConfigMapOptions
|
ConfigMap *k8s.ConfigMapOptions
|
||||||
Ingresses []*v1beta1.Ingress
|
Ingresses []*v1.Ingress
|
||||||
}
|
}
|
||||||
|
|
||||||
// Informer defines the required SharedIndexInformers that interact with the API server.
|
// Informer defines the required SharedIndexInformers that interact with the API server.
|
||||||
@ -176,6 +177,12 @@ func NewCaddyController(
|
|||||||
func (c *CaddyController) Shutdown() error {
|
func (c *CaddyController) Shutdown() error {
|
||||||
// remove this ingress controller's ip from ingress resources.
|
// remove this ingress controller's ip from ingress resources.
|
||||||
c.updateIngStatuses([]apiv1.LoadBalancerIngress{{}}, c.resourceStore.Ingresses)
|
c.updateIngStatuses([]apiv1.LoadBalancerIngress{{}}, c.resourceStore.Ingresses)
|
||||||
|
|
||||||
|
if err := caddy.Stop(); err != nil {
|
||||||
|
c.logger.Error("failed to stop caddy server", zap.Error(err))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
certmagic.CleanUpOwnLocks(c.logger.Desugar())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -274,7 +281,7 @@ func (c *CaddyController) reloadCaddy() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
c.logger.Debug("reloading caddy with config %v" + string(j))
|
c.logger.Debug("reloading caddy with config", string(j))
|
||||||
err = caddy.Load(j, false)
|
err = caddy.Load(j, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not reload caddy config %v", err.Error())
|
return fmt.Errorf("could not reload caddy config %v", err.Error())
|
||||||
|
|||||||
@ -2,21 +2,21 @@ package controller
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/caddyserver/ingress/internal/k8s"
|
"github.com/caddyserver/ingress/internal/k8s"
|
||||||
"k8s.io/api/networking/v1beta1"
|
"k8s.io/api/networking/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewStore returns a new store that keeps track of K8S resources needed by the controller.
|
// NewStore returns a new store that keeps track of K8S resources needed by the controller.
|
||||||
func NewStore(opts Options) *Store {
|
func NewStore(opts Options) *Store {
|
||||||
s := &Store{
|
s := &Store{
|
||||||
Options: &opts,
|
Options: &opts,
|
||||||
Ingresses: []*v1beta1.Ingress{},
|
Ingresses: []*v1.Ingress{},
|
||||||
ConfigMap: &k8s.ConfigMapOptions{},
|
ConfigMap: &k8s.ConfigMapOptions{},
|
||||||
}
|
}
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddIngress adds an ingress to the store. It updates the element at the given index if it is unique.
|
// AddIngress adds an ingress to the store. It updates the element at the given index if it is unique.
|
||||||
func (s *Store) AddIngress(ing *v1beta1.Ingress) {
|
func (s *Store) AddIngress(ing *v1.Ingress) {
|
||||||
isUniq := true
|
isUniq := true
|
||||||
|
|
||||||
for i := range s.Ingresses {
|
for i := range s.Ingresses {
|
||||||
@ -33,7 +33,7 @@ func (s *Store) AddIngress(ing *v1beta1.Ingress) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PluckIngress removes the ingress passed in as an argument from the stores list of ingresses.
|
// PluckIngress removes the ingress passed in as an argument from the stores list of ingresses.
|
||||||
func (s *Store) PluckIngress(ing *v1beta1.Ingress) {
|
func (s *Store) PluckIngress(ing *v1.Ingress) {
|
||||||
id := ing.GetUID()
|
id := ing.GetUID()
|
||||||
|
|
||||||
var index int
|
var index int
|
||||||
|
|||||||
@ -7,6 +7,8 @@ import (
|
|||||||
v12 "k8s.io/api/core/v1"
|
v12 "k8s.io/api/core/v1"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
|
"reflect"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ConfigMapOptions represents global options set through a configmap
|
// ConfigMapOptions represents global options set through a configmap
|
||||||
@ -20,6 +22,7 @@ type ConfigMapOptions struct {
|
|||||||
OnDemandRateLimitInterval caddy.Duration `json:"onDemandTLSRateLimitInterval,omitempty"`
|
OnDemandRateLimitInterval caddy.Duration `json:"onDemandTLSRateLimitInterval,omitempty"`
|
||||||
OnDemandRateLimitBurst int `json:"onDemandTLSRateLimitBurst,omitempty"`
|
OnDemandRateLimitBurst int `json:"onDemandTLSRateLimitBurst,omitempty"`
|
||||||
OnDemandAsk string `json:"onDemandTLSAsk,omitempty"`
|
OnDemandAsk string `json:"onDemandTLSAsk,omitempty"`
|
||||||
|
OCSPCheckInterval caddy.Duration `json:"ocspCheckInterval,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ConfigMapHandlers struct {
|
type ConfigMapHandlers struct {
|
||||||
@ -69,6 +72,18 @@ func WatchConfigMaps(options ConfigMapParams, funcs ConfigMapHandlers) cache.Sha
|
|||||||
return informer
|
return informer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func stringToCaddyDurationHookFunc() mapstructure.DecodeHookFunc {
|
||||||
|
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
|
||||||
|
if f.Kind() != reflect.String {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
if t != reflect.TypeOf(caddy.Duration(time.Second)) {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
return caddy.ParseDuration(data.(string))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func ParseConfigMap(cm *v12.ConfigMap) (*ConfigMapOptions, error) {
|
func ParseConfigMap(cm *v12.ConfigMap) (*ConfigMapOptions, error) {
|
||||||
// parse configmap
|
// parse configmap
|
||||||
cfgMap := ConfigMapOptions{}
|
cfgMap := ConfigMapOptions{}
|
||||||
@ -77,6 +92,9 @@ func ParseConfigMap(cm *v12.ConfigMap) (*ConfigMapOptions, error) {
|
|||||||
WeaklyTypedInput: true,
|
WeaklyTypedInput: true,
|
||||||
Result: &cfgMap,
|
Result: &cfgMap,
|
||||||
TagName: "json",
|
TagName: "json",
|
||||||
|
DecodeHook: mapstructure.ComposeDecodeHookFunc(
|
||||||
|
stringToCaddyDurationHookFunc(),
|
||||||
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
decoder, err := mapstructure.NewDecoder(config)
|
decoder, err := mapstructure.NewDecoder(config)
|
||||||
|
|||||||
@ -5,7 +5,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/api/networking/v1beta1"
|
networkingv1 "k8s.io/api/networking/v1"
|
||||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
@ -13,9 +13,9 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type IngressHandlers struct {
|
type IngressHandlers struct {
|
||||||
AddFunc func(obj *v1beta1.Ingress)
|
AddFunc func(obj *networkingv1.Ingress)
|
||||||
UpdateFunc func(oldObj, newObj *v1beta1.Ingress)
|
UpdateFunc func(oldObj, newObj *networkingv1.Ingress)
|
||||||
DeleteFunc func(obj *v1beta1.Ingress)
|
DeleteFunc func(obj *networkingv1.Ingress)
|
||||||
}
|
}
|
||||||
|
|
||||||
type IngressParams struct {
|
type IngressParams struct {
|
||||||
@ -25,27 +25,26 @@ type IngressParams struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func WatchIngresses(options IngressParams, funcs IngressHandlers) cache.SharedIndexInformer {
|
func WatchIngresses(options IngressParams, funcs IngressHandlers) cache.SharedIndexInformer {
|
||||||
// TODO Handle new API
|
informer := options.InformerFactory.Networking().V1().Ingresses().Informer()
|
||||||
informer := options.InformerFactory.Networking().V1beta1().Ingresses().Informer()
|
|
||||||
|
|
||||||
informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(obj interface{}) {
|
AddFunc: func(obj interface{}) {
|
||||||
ingress, ok := obj.(*v1beta1.Ingress)
|
ingress, ok := obj.(*networkingv1.Ingress)
|
||||||
|
|
||||||
if ok && IsControllerIngress(options, ingress) {
|
if ok && IsControllerIngress(options, ingress) {
|
||||||
funcs.AddFunc(ingress)
|
funcs.AddFunc(ingress)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||||
oldIng, ok1 := oldObj.(*v1beta1.Ingress)
|
oldIng, ok1 := oldObj.(*networkingv1.Ingress)
|
||||||
newIng, ok2 := newObj.(*v1beta1.Ingress)
|
newIng, ok2 := newObj.(*networkingv1.Ingress)
|
||||||
|
|
||||||
if ok1 && ok2 && IsControllerIngress(options, newIng) {
|
if ok1 && ok2 && IsControllerIngress(options, newIng) {
|
||||||
funcs.UpdateFunc(oldIng, newIng)
|
funcs.UpdateFunc(oldIng, newIng)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
DeleteFunc: func(obj interface{}) {
|
DeleteFunc: func(obj interface{}) {
|
||||||
ingress, ok := obj.(*v1beta1.Ingress)
|
ingress, ok := obj.(*networkingv1.Ingress)
|
||||||
|
|
||||||
if ok && IsControllerIngress(options, ingress) {
|
if ok && IsControllerIngress(options, ingress) {
|
||||||
funcs.DeleteFunc(ingress)
|
funcs.DeleteFunc(ingress)
|
||||||
@ -57,18 +56,20 @@ func WatchIngresses(options IngressParams, funcs IngressHandlers) cache.SharedIn
|
|||||||
}
|
}
|
||||||
|
|
||||||
// IsControllerIngress check if the ingress object can be controlled by us
|
// IsControllerIngress check if the ingress object can be controlled by us
|
||||||
// TODO Handle `ingressClassName`
|
func IsControllerIngress(options IngressParams, ingress *networkingv1.Ingress) bool {
|
||||||
func IsControllerIngress(options IngressParams, ingress *v1beta1.Ingress) bool {
|
|
||||||
ingressClass := ingress.Annotations["kubernetes.io/ingress.class"]
|
ingressClass := ingress.Annotations["kubernetes.io/ingress.class"]
|
||||||
|
if ingressClass == "" && ingress.Spec.IngressClassName != nil {
|
||||||
|
ingressClass = *ingress.Spec.IngressClassName
|
||||||
|
}
|
||||||
|
|
||||||
if !options.ClassNameRequired && ingressClass == "" {
|
if !options.ClassNameRequired && ingressClass == "" {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
return ingressClass == options.ClassName
|
return ingressClass == options.ClassName
|
||||||
}
|
}
|
||||||
|
|
||||||
func UpdateIngressStatus(kubeClient *kubernetes.Clientset, ing *v1beta1.Ingress, status []apiv1.LoadBalancerIngress) (*v1beta1.Ingress, error) {
|
func UpdateIngressStatus(kubeClient *kubernetes.Clientset, ing *networkingv1.Ingress, status []apiv1.LoadBalancerIngress) (*networkingv1.Ingress, error) {
|
||||||
ingClient := kubeClient.NetworkingV1beta1().Ingresses(ing.Namespace)
|
ingClient := kubeClient.NetworkingV1().Ingresses(ing.Namespace)
|
||||||
|
|
||||||
currIng, err := ingClient.Get(context.TODO(), ing.Name, v1.GetOptions{})
|
currIng, err := ingClient.Get(context.TODO(), ing.Name, v1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -2,7 +2,7 @@ package k8s
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
v12 "k8s.io/api/core/v1"
|
v12 "k8s.io/api/core/v1"
|
||||||
"k8s.io/api/networking/v1beta1"
|
"k8s.io/api/networking/v1"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
)
|
)
|
||||||
@ -48,7 +48,7 @@ func WatchTLSSecrets(options TLSSecretParams, funcs TLSSecretHandlers) cache.Sha
|
|||||||
return informer
|
return informer
|
||||||
}
|
}
|
||||||
|
|
||||||
func ListTLSSecrets(options TLSSecretParams, ings []*v1beta1.Ingress) ([]*v12.Secret, error) {
|
func ListTLSSecrets(options TLSSecretParams, ings []*v1.Ingress) ([]*v12.Secret, error) {
|
||||||
lister := options.InformerFactory.Core().V1().Secrets().Lister()
|
lister := options.InformerFactory.Core().V1().Secrets().Lister()
|
||||||
|
|
||||||
tlsSecrets := []*v12.Secret{}
|
tlsSecrets := []*v12.Secret{}
|
||||||
@ -64,7 +64,7 @@ func ListTLSSecrets(options TLSSecretParams, ings []*v1beta1.Ingress) ([]*v12.Se
|
|||||||
return tlsSecrets, nil
|
return tlsSecrets, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsManagedTLSSecret(secret *v12.Secret, ings []*v1beta1.Ingress) bool {
|
func IsManagedTLSSecret(secret *v12.Secret, ings []*v1.Ingress) bool {
|
||||||
for _, ing := range ings {
|
for _, ing := range ings {
|
||||||
for _, tlsRule := range ing.Spec.TLS {
|
for _, tlsRule := range ing.Spec.TLS {
|
||||||
if tlsRule.SecretName == secret.Name && ing.Namespace == secret.Namespace {
|
if tlsRule.SecretName == secret.Name && ing.Namespace == secret.Namespace {
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user