Use ConfigMap for Global Options (#30)

This commit is contained in:
Marc-Antoine 2020-11-09 10:06:14 +01:00 committed by GitHub
parent ad3c0ac56a
commit 66c52c682f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 443 additions and 188 deletions

View File

@ -14,3 +14,5 @@ sources:
maintainers:
- name: mavimo
url: https://github.com/mavimo
- name: embraser01
url: https://github.com/embraser01

View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "caddy-ingress-controller.name" . }}-configmap
namespace: {{ .Release.Namespace }}
data:
{{- range keys .Values.ingressController.config | sortAlpha }}
{{ . }}: {{ get $.Values.ingressController.config . | quote }}
{{- end }}

View File

@ -60,9 +60,9 @@ spec:
- name: tmp
mountPath: /tmp
args:
{{- if .Values.ingressController.autotls }}
- -tls
- -email={{ .Values.ingressController.email }}
- -config-map={{ include "caddy-ingress-controller.name" . }}-configmap
{{- if .Values.ingressController.watchNamespace }}
- -namespace={{ .Values.ingressController.watchNamespace }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:

View File

@ -73,8 +73,7 @@
"type": "object",
"required": [
"rbac",
"autotls",
"email"
"config"
],
"properties": {
"rbac": {
@ -90,13 +89,39 @@
}
}
},
"autotls": {
"$id": "#/properties/ingressController/properties/autotls",
"type": "boolean"
},
"email": {
"$id": "#/properties/ingressController/properties/email",
"type": "string"
"config": {
"$id": "#/properties/ingressController/properties/config",
"type": "object",
"properties": {
"acmeCA": {
"$id": "#/properties/ingressController/properties/config/properties/acmeCA",
"type": "string",
"oneOf": [
{
"format": "uri"
},
{
"maxLength": 0
}
]
},
"email": {
"$id": "#/properties/ingressController/properties/config/properties/email",
"type": "string",
"oneOf": [
{
"format": "email"
},
{
"maxLength": 0
}
]
},
"debug": {
"$id": "#/properties/ingressController/properties/config/properties/debug",
"type": "boolean"
}
}
}
}
},

View File

@ -20,10 +20,10 @@ ingressController:
rbac:
create: true
# If setting autotls the following email value must be set
# to an email address that you manage
autotls: false
email: ""
config:
acmeCA: ""
email: ""
debug: false
serviceAccount:
# Specifies whether a service account should be created

View File

@ -4,33 +4,19 @@ import (
"flag"
"github.com/caddyserver/ingress/internal/caddy"
"github.com/sirupsen/logrus"
)
func parseFlags() caddy.ControllerConfig {
var email string
flag.StringVar(&email, "email", "", "the email address to use for requesting tls certificates if automatic https is enabled.")
var namespace string
flag.StringVar(&namespace, "namespace", "", "the namespace that you would like to observe kubernetes ingress resources in.")
var enableAutomaticTLS bool
flag.BoolVar(&enableAutomaticTLS, "tls", false, "defines if automatic tls should be enabled for hostnames defined in ingress resources.")
var tlsUseStaging bool
flag.BoolVar(&tlsUseStaging, "tls-use-staging", false, "defines if the lets-encrypt staging server should be used for testing the provisioning of tls certificates.")
var configMapName string
flag.StringVar(&configMapName, "config-map", "", "defines the config map name from where to load global options")
flag.Parse()
if email == "" && enableAutomaticTLS {
logrus.Info("An email must be defined for automatic tls features, set flag `email` with the email address you would like to use for certificate registration.")
enableAutomaticTLS = false
}
return caddy.ControllerConfig{
Email: email,
AutomaticTLS: enableAutomaticTLS,
TLSUseStaging: tlsUseStaging,
WatchNamespace: namespace,
ConfigMapName: configMapName,
}
}

View File

@ -41,8 +41,7 @@ func main() {
logrus.Fatalf(msg, err)
}
restClient := kubeClient.NetworkingV1beta1().RESTClient()
c := controller.NewCaddyController(kubeClient, restClient, cfg)
c := controller.NewCaddyController(kubeClient, cfg)
reg := prometheus.NewRegistry()
reg.MustRegister(prometheus.NewGoCollector())

1
go.mod
View File

@ -5,6 +5,7 @@ go 1.14
require (
github.com/caddyserver/caddy/v2 v2.0.0
github.com/caddyserver/certmagic v0.10.12
github.com/mitchellh/mapstructure v1.1.2
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.6.0
github.com/sirupsen/logrus v1.6.0

1
go.sum
View File

@ -647,6 +647,7 @@ github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzO
github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed/go.mod h1:3rdaFaCv4AyBgu5ALFM0+tSuHrBh6v692nyQe3ikrq0=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=

View File

@ -2,7 +2,6 @@ package caddy
import (
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
"github.com/caddyserver/caddy/v2/modules/caddytls"
)
@ -22,58 +21,47 @@ type Storage struct {
type Config struct {
Storage Storage `json:"storage"`
Apps map[string]interface{} `json:"apps"`
Logging caddy.Logging `json:"logging"`
}
// ControllerConfig represents ingress controller config received through cli arguments.
type ControllerConfig struct {
Email string
AutomaticTLS bool
TLSUseStaging bool
WatchNamespace string
ConfigMapName string
}
// NewConfig returns a plain slate caddy2 config file.
func NewConfig(namespace string, cfg ControllerConfig) *Config {
acmeIssuer := caddytls.ACMEIssuer{
CA: getCAEndpoint(cfg.TLSUseStaging),
Email: cfg.Email}
// NewConfig returns a base plain slate caddy2 config file.
func NewConfig(namespace string, cfgMapConfig *Config) *Config {
var cfg *Config
return &Config{
Storage: Storage{
System: "secret_store",
StorageValues: StorageValues{
Namespace: namespace,
},
},
Apps: map[string]interface{}{
"tls": caddytls.TLS{
Automation: &caddytls.AutomationConfig{
Policies: []*caddytls.AutomationPolicy{
{
IssuerRaw: caddyconfig.JSONModuleObject(acmeIssuer, "module", "acme", nil),
if cfgMapConfig != nil {
cfg = cfgMapConfig
} else {
cfg = &Config{
Logging: caddy.Logging{},
Apps: map[string]interface{}{
"tls": &caddytls.TLS{
CertificatesRaw: caddy.ModuleMap{},
},
"http": &caddyhttp.App{
Servers: map[string]*caddyhttp.Server{
"ingress_server": {
AutoHTTPS: &caddyhttp.AutoHTTPSConfig{},
Listen: []string{":443"},
},
},
},
CertificatesRaw: caddy.ModuleMap{},
},
"http": caddyhttp.App{
Servers: map[string]*caddyhttp.Server{
"ingress_server": &caddyhttp.Server{
AutoHTTPS: &caddyhttp.AutoHTTPSConfig{
Disabled: !cfg.AutomaticTLS,
Skip: make([]string, 0),
},
Listen: []string{":80", ":443"},
},
},
},
}
}
// set cert-magic storage provider
cfg.Storage = Storage{
System: "secret_store",
StorageValues: StorageValues{
Namespace: namespace,
},
}
}
func getCAEndpoint(useStaging bool) string {
if useStaging {
return "https://acme-staging-v02.api.letsencrypt.org/directory"
}
return ""
return cfg
}

View File

@ -3,18 +3,17 @@ package controller
import (
"encoding/json"
"fmt"
"io"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
"github.com/caddyserver/caddy/v2/modules/caddytls"
"github.com/caddyserver/ingress/internal/caddy"
config "github.com/caddyserver/ingress/internal/caddy"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/api/networking/v1beta1"
)
// loadConfigMap runs when a config map with caddy config is loaded on app start.
func (c *CaddyController) onLoadConfig(obj io.Reader) {
func (c *CaddyController) onLoadConfig(obj interface{}) {
c.syncQueue.Add(LoadConfigAction{
config: obj,
})
@ -54,7 +53,7 @@ type Action interface {
// LoadConfigAction provides an implementation of the action interface.
type LoadConfigAction struct {
config io.Reader
config interface{}
}
// ResourceAddedAction provides an implementation of the action interface.
@ -75,7 +74,15 @@ type ResourceDeletedAction struct {
func (r LoadConfigAction) handle(c *CaddyController) error {
logrus.Info("Config file detected, updating Caddy config...")
return c.loadConfigFromFile(r.config)
c.resourceStore.CaddyConfig = r.config.(*config.Config)
err := regenerateConfig(c)
if err != nil {
return err
}
return nil
}
func (r ResourceAddedAction) handle(c *CaddyController) error {
@ -90,7 +97,7 @@ func (r ResourceAddedAction) handle(c *CaddyController) error {
// add this ingress to the internal store
c.resourceStore.AddIngress(ing)
err := updateConfig(c)
err := regenerateConfig(c)
if err != nil {
return err
}
@ -117,7 +124,7 @@ func (r ResourceUpdatedAction) handle(c *CaddyController) error {
// add or update this ingress in the internal store
c.resourceStore.AddIngress(ing)
err := updateConfig(c)
err := regenerateConfig(c)
if err != nil {
return err
}
@ -139,7 +146,7 @@ func (r ResourceDeletedAction) handle(c *CaddyController) error {
// add this ingress to the internal store
c.resourceStore.PluckIngress(ing)
err := updateConfig(c)
err := regenerateConfig(c)
if err != nil {
return err
}
@ -148,9 +155,32 @@ func (r ResourceDeletedAction) handle(c *CaddyController) error {
return nil
}
// updateConfig updates internal caddy config with new ingress info.
func updateConfig(c *CaddyController) error {
apps := c.resourceStore.CaddyConfig.Apps
// regenerateConfig regenerate caddy config with updated resources.
func regenerateConfig(c *CaddyController) error {
logrus.Info("Updating caddy config")
var cfg *config.Config
var cfgFile *config.Config = nil
var err error
if c.usingConfigMap {
cfgFile, err = loadCaddyConfigFile("/etc/caddy/config.json")
if err != nil {
logrus.Warn("Unable to load config file: %v", err)
}
}
cfg = config.NewConfig(c.podInfo.Namespace, cfgFile)
tlsApp := cfg.Apps["tls"].(*caddytls.TLS)
httpApp := cfg.Apps["http"].(*caddyhttp.App)
if c.resourceStore.ConfigMap != nil {
err := setConfigMapOptions(c, cfg)
if err != nil {
return errors.Wrap(err, "caddy config reload")
}
}
// if certs are defined on an ingress resource we need to handle them.
tlsCfg, err := c.HandleOwnCertManagement(c.resourceStore.Ingresses)
@ -158,22 +188,14 @@ func updateConfig(c *CaddyController) error {
return errors.Wrap(err, "caddy config reload")
}
// after TLS secrets are synched we should load them in the cert pool.
// after TLS secrets are synched we should load them in the cert pool
// and skip auto https for hosts with certs provided
if tlsCfg != nil {
apps["tls"].(caddytls.TLS).CertificatesRaw["load_folders"] = tlsCfg["load_folders"].(json.RawMessage)
} else {
// reset cert loading
apps["tls"].(caddytls.TLS).CertificatesRaw["load_folders"] = json.RawMessage(`[]`)
}
tlsApp.CertificatesRaw["load_folders"] = tlsCfg["load_folders"].(json.RawMessage)
// skip auto https for hosts with certs provided
if tlsCfg != nil {
if hosts, ok := tlsCfg["hosts"].([]string); ok {
apps["http"].(caddyhttp.App).Servers["ingress_server"].AutoHTTPS.Skip = hosts
httpApp.Servers["ingress_server"].AutoHTTPS.Skip = hosts
}
} else {
// reset any skipped hosts set
apps["http"].(caddyhttp.App).Servers["ingress_server"].AutoHTTPS.Skip = make([]string, 0)
}
if !c.usingConfigMap {
@ -183,11 +205,11 @@ func updateConfig(c *CaddyController) error {
}
// set the http server routes
apps["http"].(caddyhttp.App).Servers["ingress_server"].Routes = serverRoutes
httpApp.Servers["ingress_server"].Routes = serverRoutes
}
// reload caddy with new config
err = c.reloadCaddy()
err = c.reloadCaddy(cfg)
if err != nil {
return errors.Wrap(err, "caddy config reload")
}

View File

@ -0,0 +1,185 @@
package controller
import (
"fmt"
"github.com/caddyserver/ingress/internal/caddy"
caddy2 "github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/modules/caddytls"
"github.com/mitchellh/mapstructure"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
)
type ConfigMapOptions struct {
Debug bool `json:"debug"`
AcmeCA string `json:"acmeCA"`
Email string `json:"email"`
}
// onConfigMapAdded is run when a config map is added to the cluster.
func (c *CaddyController) onConfigMapAdded(obj interface{}) {
c.syncQueue.Add(ConfigMapAddedAction{
resource: obj,
})
}
// onConfigMapUpdated is run when an ingress resource is updated in the cluster.
func (c *CaddyController) onConfigMapUpdated(old interface{}, new interface{}) {
c.syncQueue.Add(ConfigMapUpdatedAction{
resource: new,
oldResource: old,
})
}
// onConfigMapDeleted is run when an ingress resource is deleted from the cluster.
func (c *CaddyController) onConfigMapDeleted(obj interface{}) {
c.syncQueue.Add(ConfigMapDeletedAction{
resource: obj,
})
}
// ConfigMapAddedAction provides an implementation of the action interface.
type ConfigMapAddedAction struct {
resource interface{}
}
// ConfigMapUpdatedAction provides an implementation of the action interface.
type ConfigMapUpdatedAction struct {
resource interface{}
oldResource interface{}
}
// ConfigMapDeletedAction provides an implementation of the action interface.
type ConfigMapDeletedAction struct {
resource interface{}
}
func (r ConfigMapAddedAction) handle(c *CaddyController) error {
cfgMap, ok := r.resource.(*v1.ConfigMap)
if !ok {
return fmt.Errorf("ConfigMapAddedAction: incoming resource is not of type configmap")
}
// only care about the caddy config map
if !changeTriggerUpdate(c, cfgMap) {
return nil
}
logrus.Info("New configmap detected, updating Caddy config...")
// save to the store the current config map to use
c.resourceStore.ConfigMap = cfgMap
err := regenerateConfig(c)
if err != nil {
return err
}
logrus.Info("Caddy reloaded successfully.")
return nil
}
func (r ConfigMapUpdatedAction) handle(c *CaddyController) error {
cfgMap, ok := r.resource.(*v1.ConfigMap)
if !ok {
return fmt.Errorf("ConfigMapUpdatedAction: incoming resource is not of type configmap")
}
// only care about the caddy config map
if !changeTriggerUpdate(c, cfgMap) {
return nil
}
logrus.Info("ConfigMap resource updated, updating Caddy config...")
// save to the store the current config map to use
c.resourceStore.ConfigMap = cfgMap
err := regenerateConfig(c)
if err != nil {
return err
}
logrus.Info("Caddy reloaded successfully.")
return nil
}
func (r ConfigMapDeletedAction) handle(c *CaddyController) error {
cfgMap, ok := r.resource.(*v1.ConfigMap)
if !ok {
return fmt.Errorf("ConfigMapDeletedAction: incoming resource is not of type configmap")
}
// only care about the caddy config map
if !changeTriggerUpdate(c, cfgMap) {
return nil
}
logrus.Info("ConfigMap resource deleted, updating Caddy config...")
// delete config map from internal store
c.resourceStore.ConfigMap = nil
err := regenerateConfig(c)
if err != nil {
return err
}
logrus.Info("Caddy reloaded successfully.")
return nil
}
func setConfigMapOptions(c *CaddyController, cfg *caddy.Config) error {
// parse configmap
cfgMap := ConfigMapOptions{}
config := &mapstructure.DecoderConfig{
Metadata: nil,
WeaklyTypedInput: true,
Result: &cfgMap,
TagName: "json",
}
decoder, err := mapstructure.NewDecoder(config)
if err != nil {
logrus.Warningf("unexpected error creating decoder: %v", err)
}
err = decoder.Decode(c.resourceStore.ConfigMap.Data)
if err != nil {
logrus.Warningf("unexpected error parsing configmap: %v", err)
}
logrus.Infof("using config map options: %+v to %+v", c.resourceStore.ConfigMap.Data, cfgMap)
// merge configmap options to CaddyConfig
tlsApp := cfg.Apps["tls"].(*caddytls.TLS)
//httpApp := cfg.Apps["http"].(*caddyhttp.App)
if cfgMap.Debug {
cfg.Logging.Logs = map[string]*caddy2.CustomLog{"default": {Level: "DEBUG"}}
}
if cfgMap.AcmeCA != "" || cfgMap.Email != "" {
acmeIssuer := caddytls.ACMEIssuer{}
if cfgMap.AcmeCA != "" {
acmeIssuer.CA = cfgMap.AcmeCA
}
if cfgMap.Email != "" {
acmeIssuer.Email = cfgMap.Email
}
tlsApp.Automation = &caddytls.AutomationConfig{
Policies: []*caddytls.AutomationPolicy{
{IssuerRaw: caddyconfig.JSONModuleObject(acmeIssuer, "module", "acme", nil)},
},
}
}
return nil
}
func changeTriggerUpdate(c *CaddyController, cfgMap *v1.ConfigMap) bool {
return cfgMap.Namespace == c.podInfo.Namespace && cfgMap.Name == c.config.ConfigMapName
}

View File

@ -1,12 +1,9 @@
package controller
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"time"
@ -17,12 +14,10 @@ import (
"github.com/caddyserver/ingress/pkg/storage"
"github.com/sirupsen/logrus"
apiv1 "k8s.io/api/core/v1"
"k8s.io/api/networking/v1beta1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
@ -40,29 +35,56 @@ const (
secretSyncInterval = time.Hour * 1
)
// Informer defines the required SharedIndexInformers that interact with the API server.
type Informer struct {
Ingress cache.SharedIndexInformer
ConfigMap cache.SharedIndexInformer
}
// Lister contains object listers (stores).
type Listers struct {
Ingress cache.Store
ConfigMap cache.Store
}
// CaddyController represents an caddy ingress controller.
type CaddyController struct {
resourceStore *store.Store
kubeClient *kubernetes.Clientset
restClient rest.Interface
indexer cache.Indexer
syncQueue workqueue.RateLimitingInterface
statusQueue workqueue.RateLimitingInterface // statusQueue performs ingress status updates every 60 seconds but inserts the work into the sync queue
informer cache.Controller
certManager *CertManager
podInfo *pod.Info
config c.ControllerConfig
resourceStore *store.Store
kubeClient *kubernetes.Clientset
// main queue syncing ingresses, configmaps, ... with caddy
syncQueue workqueue.RateLimitingInterface
// informer contains the cache Informers
informers *Informer
// listers contains the cache.Store interfaces used in the ingress controller
listers *Listers
// cert manager manage user provided certs
certManager *CertManager
// ingress controller pod infos
podInfo *pod.Info
// config of the controller (flags)
config c.ControllerConfig
// if a /etc/caddy/config.json is detected, it will be used instead of ingresses
usingConfigMap bool
stopChan chan struct{}
stopChan chan struct{}
}
// NewCaddyController returns an instance of the caddy ingress controller.
func NewCaddyController(kubeClient *kubernetes.Clientset, restClient rest.Interface, cfg c.ControllerConfig) *CaddyController {
func NewCaddyController(kubeClient *kubernetes.Clientset, cfg c.ControllerConfig) *CaddyController {
controller := &CaddyController{
kubeClient: kubeClient,
syncQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
statusQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
config: cfg,
kubeClient: kubeClient,
syncQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
config: cfg,
informers: &Informer{},
listers: &Listers{},
}
podInfo, err := pod.GetPodDetails(kubeClient)
@ -72,39 +94,40 @@ func NewCaddyController(kubeClient *kubernetes.Clientset, restClient rest.Interf
controller.podInfo = podInfo
// load caddy config from file if mounted with config map
var caddyCfgMap *c.Config
cfgPath := "/etc/caddy/config.json"
if _, err := os.Stat(cfgPath); !os.IsNotExist(err) {
controller.usingConfigMap = true
file, err := os.Open(cfgPath)
if err != nil {
log.Fatal(err)
}
defer file.Close()
b, err := ioutil.ReadAll(file)
if err != nil {
log.Fatal(err)
}
// load config file into caddy
controller.syncQueue.Add(LoadConfigAction{config: bytes.NewReader(b)})
json.Unmarshal(b, &caddyCfgMap)
caddyCfgMap, err := loadCaddyConfigFile("/etc/caddy/config.json")
if err != nil {
logrus.Fatalf("Unexpected error reading config.json: %v", err)
}
// setup the ingress controller and start watching resources
ingressListWatcher := cache.NewListWatchFromClient(restClient, "ingresses", cfg.WatchNamespace, fields.Everything())
indexer, informer := cache.NewIndexerInformer(ingressListWatcher, &v1beta1.Ingress{}, 0, cache.ResourceEventHandlerFuncs{
if caddyCfgMap != nil {
controller.usingConfigMap = true
}
// create 2 types of informers: one for the caddy NS and another one for ingress resources
ingressInformerFactory := informers.NewSharedInformerFactoryWithOptions(kubeClient, syncInterval, informers.WithNamespace(cfg.WatchNamespace))
caddyInformerFactory := informers.NewSharedInformerFactoryWithOptions(kubeClient, syncInterval, informers.WithNamespace(podInfo.Namespace))
controller.informers.Ingress = ingressInformerFactory.Networking().V1beta1().Ingresses().Informer()
controller.listers.Ingress = controller.informers.Ingress.GetStore()
controller.informers.ConfigMap = caddyInformerFactory.Core().V1().ConfigMaps().Informer()
controller.listers.ConfigMap = controller.informers.ConfigMap.GetStore()
// add event handlers
controller.informers.Ingress.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controller.onResourceAdded,
UpdateFunc: controller.onResourceUpdated,
DeleteFunc: controller.onResourceDeleted,
}, cache.Indexers{})
controller.indexer = indexer
controller.informer = informer
})
controller.informers.ConfigMap.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controller.onConfigMapAdded,
UpdateFunc: controller.onConfigMapUpdated,
DeleteFunc: controller.onConfigMapDeleted,
})
// setup store to keep track of resources
controller.resourceStore = store.NewStore(controller.kubeClient, podInfo.Namespace, cfg, caddyCfgMap)
controller.resourceStore = store.NewStore(kubeClient, podInfo.Namespace, cfg, caddyCfgMap)
// attempt to do initial sync of status addresses with ingresses
controller.dispatchSync()
@ -124,22 +147,25 @@ func (c *CaddyController) Shutdown() error {
// Run method starts the ingress controller.
func (c *CaddyController) Run(stopCh chan struct{}) {
err := c.reloadCaddy()
err := regenerateConfig(c)
if err != nil {
logrus.Errorf("initial caddy config load failed, %v", err.Error())
}
defer runtime.HandleCrash()
defer c.syncQueue.ShutDown()
defer c.statusQueue.ShutDown()
// start the ingress informer where we listen to new / updated ingress resources
go c.informer.Run(stopCh)
// start informers where we listen to new / updated resources
go c.informers.ConfigMap.Run(stopCh)
go c.informers.Ingress.Run(stopCh)
// wait for all involved caches to be synced, before processing items from the queue is started
if !cache.WaitForCacheSync(stopCh, c.informer.HasSynced) {
runtime.HandleError(fmt.Errorf("Timed out waiting for caches to sync"))
return
// wait for all involved caches to be synced before processing items
// from the queue
if !cache.WaitForCacheSync(stopCh,
c.informers.ConfigMap.HasSynced,
c.informers.Ingress.HasSynced,
) {
runtime.HandleError(fmt.Errorf("timed out waiting for caches to sync"))
}
// start processing events for syncing ingress resources
@ -195,22 +221,30 @@ func (c *CaddyController) handleErr(err error, action interface{}) {
logrus.Error(err)
}
// loadConfigFromFile loads caddy with a config defined by an io.Reader.
func (c *CaddyController) loadConfigFromFile(cfg io.Reader) error {
buf := new(bytes.Buffer)
buf.ReadFrom(cfg)
func loadCaddyConfigFile(cfgPath string) (*c.Config, error) {
var caddyCfgMap *c.Config
if _, err := os.Stat(cfgPath); !os.IsNotExist(err) {
file, err := os.Open(cfgPath)
if err != nil {
return nil, err
}
defer file.Close()
err := caddy.Load(buf.Bytes(), true)
if err != nil {
return fmt.Errorf("could not load caddy config %v", err.Error())
b, err := ioutil.ReadAll(file)
if err != nil {
return nil, err
}
json.Unmarshal(b, &caddyCfgMap)
} else {
return nil, nil
}
return nil
return caddyCfgMap, nil
}
// reloadCaddy reloads the internal caddy instance with config from the internal store.
func (c *CaddyController) reloadCaddy() error {
j, err := json.Marshal(c.resourceStore.CaddyConfig)
func (c *CaddyController) reloadCaddy(config *c.Config) error {
j, err := json.Marshal(config)
if err != nil {
return err
}

View File

@ -3,6 +3,7 @@ package store
import (
c "github.com/caddyserver/ingress/internal/caddy"
"github.com/sirupsen/logrus"
k "k8s.io/api/core/v1"
"k8s.io/api/networking/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
@ -12,42 +13,34 @@ import (
type Store struct {
Ingresses []*v1beta1.Ingress
Secrets []interface{} // TODO :- should we store the secrets in the ingress object?
ConfigMap *k.ConfigMap
CaddyConfig *c.Config
}
// NewStore returns a new store that keeps track of ingresses and secrets. It will attempt to get
// all current ingresses before returning.
func NewStore(kubeClient *kubernetes.Clientset, namespace string, cfg c.ControllerConfig, cfgMapConfig *c.Config) *Store {
ingresses, err := kubeClient.NetworkingV1beta1().Ingresses("").List(v1.ListOptions{})
if err != nil {
logrus.Errorf("could not get existing ingresses in cluster")
return &Store{}
}
s := &Store{
Ingresses: []*v1beta1.Ingress{},
}
for _, i := range ingresses.Items {
s.Ingresses = append(s.Ingresses, &i)
ingresses, err := kubeClient.NetworkingV1beta1().Ingresses(cfg.WatchNamespace).List(v1.ListOptions{})
if err != nil {
logrus.Errorf("could not get existing ingresses in cluster", err)
} else {
for _, i := range ingresses.Items {
s.Ingresses = append(s.Ingresses, &i)
}
}
// not using cfg map to configure the ingress controller
if cfgMapConfig == nil {
s.CaddyConfig = c.NewConfig(namespace, cfg)
return s
}
// set cert-magic storage provider
cfgMapConfig.Storage = c.Storage{
System: "secret_store",
StorageValues: c.StorageValues{
Namespace: namespace,
},
cfgMap, err := kubeClient.CoreV1().ConfigMaps(namespace).Get(cfg.ConfigMapName, v1.GetOptions{})
if err != nil {
logrus.Warn("could not get option configmap", err)
} else {
s.ConfigMap = cfgMap
}
s.CaddyConfig = cfgMapConfig
return s
}

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: caddy-global-options
namespace: caddy-system
data:
acmeCA: https://acme-staging-v02.api.letsencrypt.org/directory
email: test@example.com
debug: "false"