Merged in feature/cert-storage (pull request #1)

Implement certmagic storage interface for k8s
This commit is contained in:
Danny 2019-05-02 17:29:20 +00:00
commit 2967030fe2
22 changed files with 400 additions and 97 deletions

View File

@ -1,4 +1,8 @@
FROM alpine:latest as certs
RUN apk --update add ca-certificates
FROM scratch
COPY ./bin/ingress-controller .
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
EXPOSE 80 443
ENTRYPOINT ["/ingress-controller"]

View File

@ -2,9 +2,11 @@ apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: example
annotations:
kubernetes.io/ingress.class: caddy
spec:
rules:
- host: hello-world.xyz
- host: caddy2.kubed.co
http:
paths:
- path: /hello2
@ -14,4 +16,4 @@ spec:
- path: /hello
backend:
serviceName: example
servicePort: 8080
servicePort: 8080

View File

@ -3,7 +3,7 @@ apiVersion: v1
metadata:
name: example
spec:
type: NodePort
type: ClusterIP
selector:
app: example
ports:

View File

@ -3,7 +3,7 @@ apiVersion: v1
metadata:
name: example2
spec:
type: NodePort
type: ClusterIP
selector:
app: example2
ports:

View File

@ -3,22 +3,21 @@ package caddy
import (
"encoding/json"
"bitbucket.org/lightcodelabs/caddy2"
"bitbucket.org/lightcodelabs/caddy2/modules/caddytls"
)
type serverRoute struct {
Matchers map[string]json.RawMessage `json:"match"`
Apply []map[string]string `json:"apply"`
Respond proxyConfig `json:"respond"`
Exclusive bool `json:"exclusive"`
Matchers map[string]json.RawMessage `json:"match"`
Apply []map[string]string `json:"apply"`
Respond proxyConfig `json:"respond"`
}
type routeList []serverRoute
type proxyConfig struct {
Module string `json:"_module"`
LoadBalanceType string `json:"load_balance_type"`
Upstreams []upstreamConfig
Module string `json:"responder"`
LoadBalanceType string `json:"load_balance_type"`
Upstreams []upstreamConfig `json:"upstreams"`
}
type upstreamConfig struct {
@ -26,12 +25,13 @@ type upstreamConfig struct {
}
type httpServerConfig struct {
Listen []string `json:"listen"`
ReadTimeout caddy2.Duration `json:"read_timeout"`
ReadHeaderTimeout caddy2.Duration `json:"read_header_timeout"`
HiddenFiles []string `json:"hidden_files"` // TODO:... experimenting with shared/common state
Routes routeList `json:"routes"`
Errors httpErrorConfig `json:"errors"`
Listen []string `json:"listen"`
ReadTimeout string `json:"read_timeout"`
DisableAutoHTTPS bool `json:"disable_auto_https"`
// ReadHeaderTimeout caddy2.Duration `json:"read_header_timeout"`
// HiddenFiles []string `json:"hidden_files"` // TODO:... experimenting with shared/common state
TLSConnPolicies caddytls.ConnectionPolicies `json:"tls_connection_policies"`
Routes routeList `json:"routes"`
}
type httpErrorConfig struct {
@ -46,23 +46,66 @@ type servers struct {
Servers serverConfig `json:"servers"`
}
type TLSConfig struct {
Module string `json:"module"`
Automation caddytls.AutomationConfig `json:"automation"`
}
type httpServer struct {
HTTP servers `json:"http"`
TLS TLSConfig `json:"tls"`
HTTP servers `json:"http"`
}
// StorageValues represents the config for certmagic storage providers.
type StorageValues struct {
Namespace string `json:"namespace"`
}
// Storage represents the certmagic storage configuration.
type Storage struct {
System string `json:"system"`
StorageValues
}
// Config represents a caddy2 config file.
type Config struct {
Modules httpServer `json:"modules"`
Storage Storage `json:"storage"`
Modules httpServer `json:"apps"`
}
// NewConfig returns a plain slate caddy2 config file.
func NewConfig() *Config {
func NewConfig(namespace string) *Config {
// TODO :- get email from arguments to ingress controller
autoPolicyBytes := json.RawMessage(`{"module": "acme", "email": "navdgo@gmail.com"}`)
return &Config{
Storage: Storage{
System: "secret_store",
StorageValues: StorageValues{
Namespace: namespace,
},
},
Modules: httpServer{
TLS: TLSConfig{
Module: "acme",
Automation: caddytls.AutomationConfig{
Policies: []caddytls.AutomationPolicy{
caddytls.AutomationPolicy{
Hosts: nil,
Management: autoPolicyBytes,
},
},
},
},
HTTP: servers{
Servers: serverConfig{
Server: httpServerConfig{
Listen: []string{":80", ":443"},
DisableAutoHTTPS: false, // TODO :- allow to be set from arguments to ingress controller
ReadTimeout: "30s",
Listen: []string{":80", ":443"},
TLSConnPolicies: caddytls.ConnectionPolicies{
&caddytls.ConnectionPolicy{},
},
},
},
},

View File

@ -7,19 +7,24 @@ import (
"k8s.io/api/extensions/v1beta1"
)
// ~~~~
// TODO :-
// when setting the upstream url we should should bypass kube-proxy and get the ip address of
// the pod for the deployment we are proxying to so that we can proxy to that ip address port.
// this is good for session affinity and increases performance (since we don't have to hit dns).
// ~~~~
// ConvertToCaddyConfig returns a new caddy routelist based off of ingresses managed by this controller.
func ConvertToCaddyConfig(ings []*v1beta1.Ingress) ([]serverRoute, error) {
func ConvertToCaddyConfig(ings []*v1beta1.Ingress) ([]serverRoute, []string, error) {
// ~~~~
// TODO :-
// when setting the upstream url we should should bypass kube-proxy and get the ip address of
// the pod for the deployment we are proxying to so that we can proxy to that ip address port.
// this is good for session affinity and increases performance (since we don't have to hit dns).
// ~~~~
// record hosts for tls policies
var hosts []string
// create a server route for each ingress route
var routes routeList
for _, ing := range ings {
for _, rule := range ing.Spec.Rules {
hosts = append(hosts, rule.Host)
for _, path := range rule.HTTP.Paths {
r := baseRoute(path.Backend.ServiceName)
@ -32,20 +37,28 @@ func ConvertToCaddyConfig(ings []*v1beta1.Ingress) ([]serverRoute, error) {
"path": p,
}
// add logging middleware to all routes
r.Apply = []map[string]string{
map[string]string{
"file": "access.log",
"middleware": "log",
},
}
routes = append(routes, r)
}
}
}
return routes, nil
return routes, hosts, nil
}
func baseRoute(upstream string) serverRoute {
return serverRoute{
Apply: []map[string]string{
map[string]string{
"_module": "log",
"file": "access.log",
"middleware": "log",
"file": "access.log",
},
},
Respond: proxyConfig{

View File

@ -74,6 +74,9 @@ func (r ResourceAddedAction) handle(c *CaddyController) error {
return err
}
// js, _ := json.MarshalIndent(c.resourceStore.CaddyConfig, "", " ")
// fmt.Printf("\n%v\n", string(js))
// ensure that ingress source is updated to point to this ingress controller's ip
err = c.syncStatus([]*v1beta1.Ingress{ing})
if err != nil {
@ -129,13 +132,19 @@ func (r ResourceDeletedAction) handle(c *CaddyController) error {
func updateConfig(c *CaddyController) error {
// update internal caddy config with new ingress info
serverRoutes, err := caddy.ConvertToCaddyConfig(c.resourceStore.Ingresses)
serverRoutes, hosts, err := caddy.ConvertToCaddyConfig(c.resourceStore.Ingresses)
if err != nil {
return errors.Wrap(err, "converting ingress resources to caddy config")
}
if c.resourceStore.CaddyConfig != nil {
c.resourceStore.CaddyConfig.Modules.HTTP.Servers.Server.Routes = serverRoutes
// set tls policies
p := c.resourceStore.CaddyConfig.Modules.TLS.Automation.Policies
for i := range p {
p[i].Hosts = hosts
}
}
// reload caddy2 config with newConfig

View File

@ -3,7 +3,9 @@ package controller
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"net/http"
"os"
"time"
@ -26,6 +28,8 @@ import (
_ "bitbucket.org/lightcodelabs/caddy2/modules/caddyhttp"
_ "bitbucket.org/lightcodelabs/caddy2/modules/caddyhttp/caddylog"
_ "bitbucket.org/lightcodelabs/caddy2/modules/caddyhttp/staticfiles"
"bitbucket.org/lightcodelabs/ingress/pkg/storage"
_ "bitbucket.org/lightcodelabs/ingress/pkg/storage"
_ "bitbucket.org/lightcodelabs/proxy"
)
@ -53,6 +57,12 @@ type CaddyController struct {
// NewCaddyController returns an instance of the caddy ingress controller.
func NewCaddyController(namespace string, kubeClient *kubernetes.Clientset, resource string, restClient rest.Interface) *CaddyController {
// TODO :- we should get the namespace of the ingress we are processing to store secrets
// Do this in the SecretStorage package
if namespace == "" {
namespace = "default"
}
controller := &CaddyController{
kubeClient: kubeClient,
namespace: namespace,
@ -69,7 +79,7 @@ func NewCaddyController(namespace string, kubeClient *kubernetes.Clientset, reso
controller.indexer = indexer
controller.informer = informer
controller.resourceStore = store.NewStore(controller.kubeClient)
controller.resourceStore = store.NewStore(controller.kubeClient, namespace)
podInfo, err := pod.GetPodDetails(kubeClient)
if err != nil {
@ -80,6 +90,24 @@ func NewCaddyController(namespace string, kubeClient *kubernetes.Clientset, reso
// attempt to do initial sync with ingresses
controller.syncQueue.Add(SyncStatusAction{})
// Register caddy cert storage module.
caddy2.RegisterModule(caddy2.Module{
Name: "caddy.storage.secret_store",
New: func() (interface{}, error) {
ss := &storage.SecretStorage{
Namespace: namespace,
KubeClient: kubeClient,
}
return ss, nil
},
})
err = caddy2.StartAdmin("127.0.0.1:1234")
if err != nil {
klog.Fatal(err)
}
return controller
}
@ -101,12 +129,16 @@ func (c *CaddyController) reloadCaddy() error {
return err
}
cfgReader := bytes.NewReader(j)
err = caddy2.Load(cfgReader)
// post to load endpoint
resp, err := http.Post("http://127.0.0.1:1234/load", "application/json", bytes.NewBuffer(j))
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return errors.New("could not reload caddy config")
}
return nil
}

View File

@ -17,7 +17,7 @@ type Store struct {
// NewStore returns a new store that keeps track of ingresses and secrets. It will attempt to get
// all current ingresses before returning.
func NewStore(kubeClient *kubernetes.Clientset) *Store {
func NewStore(kubeClient *kubernetes.Clientset, namespace string) *Store {
ingresses, err := kubeClient.ExtensionsV1beta1().Ingresses("").List(v1.ListOptions{})
if err != nil {
klog.Errorf("could not get existing ingresses in cluster")
@ -26,7 +26,7 @@ func NewStore(kubeClient *kubernetes.Clientset) *Store {
s := &Store{
Ingresses: []*v1beta1.Ingress{},
CaddyConfig: caddy.NewConfig(),
CaddyConfig: caddy.NewConfig(namespace),
}
for _, i := range ingresses.Items {

View File

@ -1,30 +1,25 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: caddyingresscontroller-role
name: caddy-ingress-controller-role
rules:
- apiGroups:
- ""
- "extensions"
resources:
- ingresses
- routes
- extensions
- ingresses/status
verbs:
- list
- get
- update
- patch
- watch
- delete
- secrets
verbs: ["*"]
- apiGroups:
- ""
resources:
- services
- pods
- nodes
- routes
- extensions
verbs:
- list
- get
- watch
- watch

View File

@ -1,12 +1,12 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: caddyingresscontroller-role-binding
name: caddy-ingress-controller-role-binding
roleRef:
kind: ClusterRole
name: caddyingresscontroller-role
name: caddy-ingress-controller-role
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: caddyingresscontroller
name: caddy-ingress-controller
namespace: default

View File

@ -1,10 +1,10 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: caddyingresscontroller
name: caddy-ingress-controller
labels:
app: caddyIngressController
chart: "caddyingresscontroller-v0.1.0"
app: caddy-ingress-controller
chart: "caddy-ingress-controller-v0.1.0"
release: "release-name"
heritage: "Tiller"
version: v0.1.0
@ -14,21 +14,22 @@ spec:
revisionHistoryLimit: 2
selector:
matchLabels:
app: caddyIngressController
app: caddy-ingress-controller
release: "release-name"
template:
metadata:
labels:
app: caddyIngressController
chart: "caddyingresscontroller-v0.1.0"
app: caddy-ingress-controller
chart: "caddy-ingress-controller-v0.1.0"
release: "release-name"
heritage: "Tiller"
version: v0.1.0
spec:
serviceAccountName: caddyingresscontroller
serviceAccountName: caddy-ingress-controller
containers:
- name: caddyingresscontroller
image: "caddy/ingresscontroller"
- name: caddy-ingress-controller
image: "gcr.io/danny-239313/ingresscontroller"
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: true
@ -43,10 +44,8 @@ spec:
ports:
- name: http
containerPort: 80
hostPort: 80 # optional, required if running in minikube
- name: https
containerPort: 443
hostPort: 443 # optional, required if running in minikube
env:
- name: POD_NAME
valueFrom:

View File

@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
name: caddy-ingress-controller
labels:
app: caddy-ingress-controller
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
app: caddy-ingress-controller
type: "LoadBalancer"

View File

@ -1,10 +1,11 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: caddyingresscontroller
labels:
app: caddyIngressController
chart: "caddyingresscontroller-v0.1.0"
app: caddy-ingress-controller
chart: "caddy-ingress-controller-v0.1.0"
release: "release-name"
heritage: "Tiller"
version: v0.1.0
name: caddy-ingress-controller

View File

@ -1,4 +1,4 @@
apiVersion: v1
description: A helm chart for the Caddy Kubernetes ingress controller
name: caddyingresscontroller
name: caddy-ingress-controller
version: v0.1.0

View File

@ -9,19 +9,17 @@ rules:
- "extensions"
resources:
- ingresses
- routes
verbs:
- list
- get
- update
- patch
- watch
- delete
- ingresses/status
- secrets
verbs: ["*"]
- apiGroups:
- ""
resources:
- services
- pods
- nodes
- routes
- extensions
verbs:
- list
- get

View File

@ -28,10 +28,32 @@ spec:
{{ toYaml .Values.caddyingresscontroller.deployment.labels | indent 8 }}
{{- end }}
spec:
serviceAccountName: {{ .Values.serviceAccountName }}
containers:
- image: "{{ .Values.caddyingresscontroller.image.name }}:{{ .Values.caddyingresscontroller.image.tag }}"
- name: {{ .Values.name }}
image: "{{ .Values.caddyingresscontroller.image.name }}:{{ .Values.caddyingresscontroller.image.tag }}"
imagePullPolicy: {{ .Values.caddyingresscontroller.image.pullPolicy }}
name: {{ .Values.name }}
securityContext:
allowPrivilegeEscalation: true
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
# www-data -> 33
runAsUser: 0
runAsGroup: 0
ports:
- name: http
containerPort: 80
{{- if .Values.minikube }}
hostPort: 80 # optional, required if running in minikube
{{- end }}
- name: https
containerPort: 443
{{- if .Values.minikube }}
hostPort: 443 # optional, required if running in minikube
{{- end }}
env:
- name: POD_NAME
valueFrom:
@ -44,5 +66,4 @@ spec:
{{- if .Values.caddyingresscontroller.watchNamespace }}
- name: KUBERNETES_NAMESPACE
value: {{ .Values.caddyingresscontroller.watchNamespace | quote }}
{{- end }}
serviceAccountName: {{ .Values.serviceAccountName }}
{{- end }}

View File

@ -0,0 +1,23 @@
{{- if .Values.minikube }}
# we don't need a loadbalancer for local deployment purposes
{{ else }}
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.name }}
labels:
app: {{ .Values.name }}
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
app: {{ .Values.name }}
type: "LoadBalancer"
{{- end }}

View File

@ -10,19 +10,17 @@ rules:
- "extensions"
resources:
- ingresses
- routes
verbs:
- list
- get
- update
- patch
- watch
- delete
- ingresses/status
- secrets
verbs: ["*"]
- apiGroups:
- ""
resources:
- services
- pods
- nodes
- routes
- extensions
verbs:
- list
- get

View File

@ -1,7 +1,4 @@
# Default values for caddyingresscontroller.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# Default values for the caddy ingress controller.
kubernetes:
host: https://kubernetes.default
@ -24,11 +21,12 @@ caddyingresscontroller:
version: "v0.1.0"
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name: caddyIngressController
name: caddy-ingress-controller
image:
name: caddy/ingresscontroller
name: "gcr.io/danny-239313/ingresscontroller"
tag: "v0.1.0"
pullPolicy: IfNotPresent
name: "caddyingresscontroller"
serviceAccountName: "caddyingresscontroller"
name: "caddy-ingress-controller"
serviceAccountName: "caddy-ingress-controller"
minikube: false

147
pkg/storage/storage.go Normal file
View File

@ -0,0 +1,147 @@
package storage
import (
"fmt"
"regexp"
"strings"
"github.com/mholt/certmagic"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/klog"
)
// matchLabels are attached to each resource so that they can be found in the future.
var matchLabels = map[string]string{
"manager": "caddy",
}
// labelSelector is the search string that will return all secrets managed by the caddy ingress controller.
var labelSelector = "manager=caddy"
// specialChars is a regex that matches all special characters except '.' and '-'.
var specialChars = regexp.MustCompile("[^0-9a-zA-Z.-]+")
// cleanKey strips all special characters that are not supported by kubernetes names and converts them to a '.'.
func cleanKey(key string) string {
return "caddy.ingress--" + specialChars.ReplaceAllString(key, "")
}
// SecretStorage facilitates storing certificates retrieved by certmagic in kubernetes secrets.
type SecretStorage struct {
Namespace string
KubeClient *kubernetes.Clientset
}
// CertMagicStorage returns a certmagic storage type to be used by caddy.
func (s *SecretStorage) CertMagicStorage() (certmagic.Storage, error) {
return s, nil
}
// Exists returns true if key exists in fs.
func (s *SecretStorage) Exists(key string) bool {
secrets, err := s.KubeClient.CoreV1().Secrets(s.Namespace).List(metav1.ListOptions{
FieldSelector: fmt.Sprintf("metadata.name=%v", cleanKey(key)),
})
if err != nil {
klog.Error(err)
return false
}
var found bool
for _, i := range secrets.Items {
if i.ObjectMeta.Name == cleanKey(key) {
found = true
break
}
}
return found
}
// Store saves value at key. More than certs and keys are stored by certmagic in secrets.
func (s *SecretStorage) Store(key string, value []byte) error {
se := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: cleanKey(key),
Labels: matchLabels,
},
Data: map[string][]byte{
"value": value,
},
}
_, err := s.KubeClient.CoreV1().Secrets(s.Namespace).Create(&se)
if err != nil {
return err
}
return nil
}
// Load retrieves the value at the given key.
func (s *SecretStorage) Load(key string) ([]byte, error) {
secret, err := s.KubeClient.CoreV1().Secrets(s.Namespace).Get(cleanKey(key), metav1.GetOptions{})
if err != nil {
return nil, err
}
return secret.Data["value"], nil
}
// Delete deletes the value at the given key.
func (s *SecretStorage) Delete(key string) error {
err := s.KubeClient.CoreV1().Secrets(s.Namespace).Delete(cleanKey(key), &metav1.DeleteOptions{})
if err != nil {
return err
}
return nil
}
// List returns all keys that match prefix.
func (s *SecretStorage) List(prefix string, recursive bool) ([]string, error) {
var keys []string
secrets, err := s.KubeClient.CoreV1().Secrets(s.Namespace).List(metav1.ListOptions{LabelSelector: labelSelector})
if err != nil {
return keys, err
}
// TODO :- do we need to handle the recursive flag?
for _, secret := range secrets.Items {
key := secret.ObjectMeta.Name
if strings.HasPrefix(key, cleanKey(prefix)) {
keys = append(keys, key)
}
}
return keys, err
}
// Stat returns information about key.
func (s *SecretStorage) Stat(key string) (certmagic.KeyInfo, error) {
secret, err := s.KubeClient.CoreV1().Secrets(s.Namespace).Get(cleanKey(key), metav1.GetOptions{})
if err != nil {
return certmagic.KeyInfo{}, err
}
return certmagic.KeyInfo{
Key: key,
Modified: secret.GetCreationTimestamp().UTC(),
Size: int64(len(secret.Data["value"])),
IsTerminal: false,
}, nil
}
// Lock is a noop since the kubernetes client is thread safe.
func (s *SecretStorage) Lock(key string) error {
return nil
}
// Unlock is a noop since the kubernetes client is thread safe.
func (s *SecretStorage) Unlock(key string) error {
return nil
}

View File

@ -2,7 +2,7 @@ apiVersion: skaffold/v1beta8
kind: Config
build:
artifacts:
- image: caddy/ingresscontroller
- image: gcr.io/danny-239313/ingresscontroller
deploy:
kubectl:
manifests:
@ -15,3 +15,4 @@ deploy:
- kubernetes/generated/clusterrolebinding.yaml
- kubernetes/generated/deployment.yaml
- kubernetes/generated/serviceaccount.yaml
- kubernetes/generated/loadbalancer.yaml