Merged in feature/config (pull request #2)

Provide ability to configure caddy ingress controller

* Update helm charts
  * Default install into caddy-sytem namespace
  * Allow inter namespace service discovery
This commit is contained in:
Danny 2019-05-06 21:29:41 +00:00
parent 2967030fe2
commit e8b9c8b94c
21 changed files with 111 additions and 109 deletions

36
cmd/caddy/flag.go Normal file
View File

@ -0,0 +1,36 @@
package main
import (
"flag"
"bitbucket.org/lightcodelabs/ingress/internal/caddy"
"k8s.io/klog"
)
func parseFlags() caddy.ControllerConfig {
var email string
flag.StringVar(&email, "email", "", "the email address to use for requesting tls certificates if automatic https is enabled.")
var namespace string
flag.StringVar(&namespace, "observe-namespace", "", "the namespace that you would like to observe kubernetes ingress resources in.")
var enableAutomaticTLS bool
flag.BoolVar(&enableAutomaticTLS, "tls", false, "defines if automatic tls should be enabled for hostnames defined in ingress resources.")
var tlsUseStaging bool
flag.BoolVar(&tlsUseStaging, "tls-use-staging", false, "defines if the lets-encrypt staging server should be used for testing the provisioning of tls certificates.")
flag.Parse()
if email == "" && enableAutomaticTLS {
klog.Info("An email must be defined for automatic tls features, set flag `email` with the email address you would like to use for certificate registration.")
enableAutomaticTLS = false
}
return caddy.ControllerConfig{
Email: email,
AutomaticTLS: enableAutomaticTLS,
TLSUseStaging: tlsUseStaging,
WatchNamespace: namespace,
}
}

View File

@ -1,7 +1,6 @@
package main
import (
"os"
"time"
"bitbucket.org/lightcodelabs/ingress/internal/controller"
@ -14,50 +13,42 @@ import (
)
const (
// High enough QPS to fit all expected use cases. QPS=0 is not set here, because
// client code is overriding it.
// high enough QPS to fit all expected use cases.
defaultQPS = 1e6
// High enough Burst to fit all expected use cases. Burst=0 is not set here, because
// client code is overriding it.
// high enough Burst to fit all expected use cases.
defaultBurst = 1e6
)
func main() {
klog.InitFlags(nil)
// get the namespace to monitor ingress resources for
namespace := os.Getenv("KUBERNETES_NAMESPACE")
if len(namespace) == 0 {
namespace = v1.NamespaceAll
klog.Warning("KUBERNETES_NAMESPACE is unset, will monitor ingresses in all namespaces.")
}
// TODO :- implement
// parse any flags required to configure the caddy ingress controller
// cfg, err := parseFlags()
// if err != nil {
// klog.Fatal(err)
// }
cfg := parseFlags()
if cfg.WatchNamespace == "" {
cfg.WatchNamespace = v1.NamespaceAll
klog.Warning("-namespace flag is unset, caddy ingress controller will monitor ingress resources in all namespaces.")
}
// get client to access the kubernetes service api
kubeClient, err := createApiserverClient()
if err != nil {
msg := `
Error while initiating a connection to the Kubernetes API server.
This could mean the cluster is misconfigured (e.g. it has invalid API server certificates or Service Accounts configuration)
`
This could mean the cluster is misconfigured (e.g. it has invalid
API server certificates or Service Accounts configuration)
`
klog.Fatalf(msg, err)
}
var resource = "ingresses"
restClient := kubeClient.ExtensionsV1beta1().RESTClient()
// start ingress controller
c := controller.NewCaddyController(namespace, kubeClient, resource, restClient)
c := controller.NewCaddyController(kubeClient, restClient, cfg)
// TODO :-
// TODO :- health metrics
// create http server to expose controller health metrics
klog.Info("Starting the caddy ingress controller")
@ -98,11 +89,12 @@ func createApiserverClient() (*kubernetes.Clientset, error) {
Jitter: 0.1,
}
klog.V(2).Info("Trying to discover Kubernetes version")
klog.V(2).Info("Attempting to discover Kubernetes version")
var v *version.Info
var retries int
var lastErr error
err = wait.ExponentialBackoff(defaultRetry, func() (bool, error) {
v, err = client.Discovery().ServerVersion()
if err == nil {

View File

@ -2,6 +2,7 @@ package caddy
import (
"encoding/json"
"fmt"
"bitbucket.org/lightcodelabs/caddy2/modules/caddytls"
)
@ -73,10 +74,17 @@ type Config struct {
Modules httpServer `json:"apps"`
}
// ControllerConfig represents ingress controller config received through cli arguments.
type ControllerConfig struct {
Email string
AutomaticTLS bool
TLSUseStaging bool
WatchNamespace string
}
// NewConfig returns a plain slate caddy2 config file.
func NewConfig(namespace string) *Config {
// TODO :- get email from arguments to ingress controller
autoPolicyBytes := json.RawMessage(`{"module": "acme", "email": "navdgo@gmail.com"}`)
func NewConfig(namespace string, cfg ControllerConfig) *Config {
autoPolicyBytes := json.RawMessage(fmt.Sprintf(`{"module": "acme", "email": "%v"}`, cfg.Email))
return &Config{
Storage: Storage{
@ -100,7 +108,7 @@ func NewConfig(namespace string) *Config {
HTTP: servers{
Servers: serverConfig{
Server: httpServerConfig{
DisableAutoHTTPS: false, // TODO :- allow to be set from arguments to ingress controller
DisableAutoHTTPS: !cfg.AutomaticTLS,
ReadTimeout: "30s",
Listen: []string{":80", ":443"},
TLSConnPolicies: caddytls.ConnectionPolicies{

View File

@ -26,7 +26,8 @@ func ConvertToCaddyConfig(ings []*v1beta1.Ingress) ([]serverRoute, []string, err
hosts = append(hosts, rule.Host)
for _, path := range rule.HTTP.Paths {
r := baseRoute(path.Backend.ServiceName)
clusterHostName := fmt.Sprintf("%v.%v.svc.cluster.local", path.Backend.ServiceName, ing.Namespace)
r := baseRoute(clusterHostName)
// create matchers for ingress host and path
h := json.RawMessage(fmt.Sprintf(`["%v"]`, rule.Host))

View File

@ -10,12 +10,13 @@ import (
"time"
"bitbucket.org/lightcodelabs/caddy2"
"bitbucket.org/lightcodelabs/ingress/internal/caddy"
"bitbucket.org/lightcodelabs/ingress/internal/pod"
"bitbucket.org/lightcodelabs/ingress/internal/store"
"bitbucket.org/lightcodelabs/ingress/pkg/storage"
apiv1 "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/fields"
run "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
@ -28,16 +29,9 @@ import (
_ "bitbucket.org/lightcodelabs/caddy2/modules/caddyhttp"
_ "bitbucket.org/lightcodelabs/caddy2/modules/caddyhttp/caddylog"
_ "bitbucket.org/lightcodelabs/caddy2/modules/caddyhttp/staticfiles"
"bitbucket.org/lightcodelabs/ingress/pkg/storage"
_ "bitbucket.org/lightcodelabs/ingress/pkg/storage"
_ "bitbucket.org/lightcodelabs/proxy"
)
// ResourceMap are resources from where changes are going to be detected
var ResourceMap = map[string]run.Object{
"ingresses": &v1beta1.Ingress{},
}
const (
// how often we should attempt to keep ingress resource's source address in sync
syncInterval = time.Second * 30
@ -47,45 +41,40 @@ const (
type CaddyController struct {
resourceStore *store.Store
kubeClient *kubernetes.Clientset
namespace string
indexer cache.Indexer
syncQueue workqueue.RateLimitingInterface
statusQueue workqueue.RateLimitingInterface // statusQueue performs ingress status updates every 60 seconds but inserts the work into the sync queue
informer cache.Controller
podInfo *pod.Info
config caddy.ControllerConfig
}
// NewCaddyController returns an instance of the caddy ingress controller.
func NewCaddyController(namespace string, kubeClient *kubernetes.Clientset, resource string, restClient rest.Interface) *CaddyController {
// TODO :- we should get the namespace of the ingress we are processing to store secrets
// Do this in the SecretStorage package
if namespace == "" {
namespace = "default"
}
func NewCaddyController(kubeClient *kubernetes.Clientset, restClient rest.Interface, cfg caddy.ControllerConfig) *CaddyController {
// setup the ingress controller and start watching resources
controller := &CaddyController{
kubeClient: kubeClient,
namespace: namespace,
syncQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
statusQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
config: cfg,
}
ingressListWatcher := cache.NewListWatchFromClient(restClient, resource, namespace, fields.Everything())
indexer, informer := cache.NewIndexerInformer(ingressListWatcher, ResourceMap[resource], 0, cache.ResourceEventHandlerFuncs{
ingressListWatcher := cache.NewListWatchFromClient(restClient, "ingresses", cfg.WatchNamespace, fields.Everything())
indexer, informer := cache.NewIndexerInformer(ingressListWatcher, &v1beta1.Ingress{}, 0, cache.ResourceEventHandlerFuncs{
AddFunc: controller.onResourceAdded,
UpdateFunc: controller.onResourceUpdated,
DeleteFunc: controller.onResourceDeleted,
}, cache.Indexers{})
controller.indexer = indexer
controller.informer = informer
controller.resourceStore = store.NewStore(controller.kubeClient, namespace)
podInfo, err := pod.GetPodDetails(kubeClient)
if err != nil {
klog.Fatalf("Unexpected error obtaining pod information: %v", err)
}
controller.podInfo = podInfo
controller.indexer = indexer
controller.informer = informer
controller.resourceStore = store.NewStore(controller.kubeClient, podInfo.Namespace, cfg)
// attempt to do initial sync with ingresses
controller.syncQueue.Add(SyncStatusAction{})
@ -95,7 +84,7 @@ func NewCaddyController(namespace string, kubeClient *kubernetes.Clientset, reso
Name: "caddy.storage.secret_store",
New: func() (interface{}, error) {
ss := &storage.SecretStorage{
Namespace: namespace,
Namespace: podInfo.Namespace,
KubeClient: kubeClient,
}
@ -103,6 +92,7 @@ func NewCaddyController(namespace string, kubeClient *kubernetes.Clientset, reso
},
})
// start caddy2
err = caddy2.StartAdmin("127.0.0.1:1234")
if err != nil {
klog.Fatal(err)

View File

@ -30,7 +30,7 @@ func (c *CaddyController) syncStatus(ings []*v1beta1.Ingress) error {
return err
}
klog.Info("Synching Ingress resource source addresses")
klog.V(2).Info("Synching Ingress resource source addresses")
c.updateIngStatuses(sliceToLoadBalancerIngress(addrs), ings)
return nil

View File

@ -17,7 +17,7 @@ type Store struct {
// NewStore returns a new store that keeps track of ingresses and secrets. It will attempt to get
// all current ingresses before returning.
func NewStore(kubeClient *kubernetes.Clientset, namespace string) *Store {
func NewStore(kubeClient *kubernetes.Clientset, namespace string, cfg caddy.ControllerConfig) *Store {
ingresses, err := kubeClient.ExtensionsV1beta1().Ingresses("").List(v1.ListOptions{})
if err != nil {
klog.Errorf("could not get existing ingresses in cluster")
@ -26,7 +26,7 @@ func NewStore(kubeClient *kubernetes.Clientset, namespace string) *Store {
s := &Store{
Ingresses: []*v1beta1.Ingress{},
CaddyConfig: caddy.NewConfig(namespace),
CaddyConfig: caddy.NewConfig(namespace, cfg),
}
for _, i := range ingresses.Items {

View File

@ -2,6 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: caddy-ingress-controller-role
namespace: caddy-system
rules:
- apiGroups:
- ""

View File

@ -2,6 +2,7 @@ kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: caddy-ingress-controller-role-binding
namespace: caddy-system
roleRef:
kind: ClusterRole
name: caddy-ingress-controller-role
@ -9,4 +10,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: caddy-ingress-controller
namespace: default
namespace: caddy-system

View File

@ -2,6 +2,7 @@ apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: caddy-ingress-controller
namespace: caddy-system
labels:
app: caddy-ingress-controller
chart: "caddy-ingress-controller-v0.1.0"
@ -54,4 +55,7 @@ spec:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
fieldPath: metadata.namespace
args:
- -tls
- -email=navdgo@gmail.com

View File

@ -2,6 +2,7 @@ apiVersion: v1
kind: Service
metadata:
name: caddy-ingress-controller
namespace: caddy-system
labels:
app: caddy-ingress-controller
spec:

View File

@ -1,6 +1,7 @@
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: caddy-system
labels:
app: caddy-ingress-controller
chart: "caddy-ingress-controller-v0.1.0"

View File

@ -1,8 +1,9 @@
{{- if and ( .Values.caddyingresscontroller.rbac.create ) (eq .Values.caddyingresscontroller.watchNamespace "") }}
{{- if .Values.caddyingresscontroller.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: {{ .Values.name }}-role
namespace: {{ .Release.Namespace }}
rules:
- apiGroups:
- ""

View File

@ -1,8 +1,9 @@
{{- if and ( .Values.caddyingresscontroller.rbac.create ) (eq .Values.caddyingresscontroller.watchNamespace "") }}
{{- if .Values.caddyingresscontroller.rbac.create }}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: {{ .Values.name }}-role-binding
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ .Values.name }}-role

View File

@ -2,6 +2,7 @@ apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: {{ .Values.name }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ .Values.name }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
@ -63,7 +64,8 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- if .Values.caddyingresscontroller.watchNamespace }}
- name: KUBERNETES_NAMESPACE
value: {{ .Values.caddyingresscontroller.watchNamespace | quote }}
{{- end }}
args:
{{- if .Values.autotls }}
- -tls
- -email={{ .Values.email }}
{{- end }}

View File

@ -5,6 +5,7 @@ apiVersion: v1
kind: Service
metadata:
name: {{ .Values.name }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ .Values.name }}
spec:

View File

@ -1,28 +0,0 @@
{{- if and ( .Values.caddyingresscontroller.rbac.create ) (.Values.caddyingresscontroller.watchNamespace) }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ .Values.name }}-role
namespace: {{ .Values.caddyingresscontroller.watchNamespace | quote }}
rules:
- apiGroups:
- ""
- "extensions"
resources:
- ingresses
- ingresses/status
- secrets
verbs: ["*"]
- apiGroups:
- ""
resources:
- services
- pods
- nodes
- routes
- extensions
verbs:
- list
- get
- watch
{{- end }}

View File

@ -1,15 +0,0 @@
{{- if and ( .Values.caddyingresscontroller.rbac.create ) (.Values.caddyingresscontroller.watchNamespace) }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ .Values.name }}-role-binding
namespace: {{ .Values.caddyingresscontroller.watchNamespace | quote }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ .Values.name }}-role
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccountName }}
namespace: {{ .Release.Namespace | quote }}
{{- end }}

View File

@ -2,6 +2,7 @@
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: {{ .Release.Namespace }}
labels:
app: {{ .Values.name }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"

View File

@ -4,7 +4,6 @@ kubernetes:
caddyingresscontroller:
tolerations: {}
watchNamespace: ""
deployment:
labels:
version: "v0.1.0"
@ -29,4 +28,9 @@ caddyingresscontroller:
name: "caddy-ingress-controller"
serviceAccountName: "caddy-ingress-controller"
minikube: false
minikube: false
# If setting autotls the following email value must be set
# to an email address that you manage
autotls: false
email: ""

View File

@ -15,4 +15,4 @@ deploy:
- kubernetes/generated/clusterrolebinding.yaml
- kubernetes/generated/deployment.yaml
- kubernetes/generated/serviceaccount.yaml
- kubernetes/generated/loadbalancer.yaml
# - kubernetes/generated/loadbalancer.yaml