initial info for contributions (#21)

* initial info for contributions

* Auto update docker images on file changes for skaffold

* improve build time ignoring files that are not relevant
This commit is contained in:
Marco Vito Moscaritolo 2020-02-24 05:58:46 +01:00 committed by GitHub
parent 2e31a66bbd
commit 9155435e2d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 411 additions and 37 deletions

54
CONTRIBUTING.md Normal file
View File

@ -0,0 +1,54 @@
## Requirements
We will explain how to contribute to this project using a linux machine, in order to be able ot easly contribute you need:
- A machine with a public IP in order to use let's encrypt (you can provision ad-hoc machine on any clud provider you use)
- A domain that redirect to server IP
- [kind](https://github.com/kubernetes-sigs/kind) (to create a development cluster)
- [skaffold](https://skaffold.dev/) (to improve development experience)
- [Docker HUB](https://hub.docker.com) account (to store your docker images)
## Setup a development cluster
We create a three node cluster (master plus two worker), we start to setup the configuration:
```bash
cat <<EOF >> cluster.yml
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
- role: worker
- role: worker
EOF
```
than we create the cluster
```bash
kind create cluster --config=cluster.yml
```
and activate the `kubectl` config via:
```
kind export kubeconfig
```
## Configure your docker credentials
Authenticate your docker intance:
```
docker login
```
## Setup development env
Replace the docker image you are going to use in `kubernetes/generated/deployment.yaml` and `skaffold.yaml` replacing `MYACCOUNT` with your Docker Hub account in `docker.io/MYACCOUNT/caddy-ingress-controller`
Replace also the domain name to use in `hack/test/example-ingress.yaml` from `MYDOMAIN.TDL` to your domain (ensore also that the subdomain `example1` and `example2` are resolved to the server public IP)
Than we can start skaffold using:
```
skaffold dev --port-forward
```
this will automatically:
- build your docker image every time you change some code
- update kubernetes config every time you change some file
- expose the caddy ingress controller (port 80 and 443) on publc server

View File

@ -1,8 +1,21 @@
FROM alpine:latest as certs
RUN apk --update add ca-certificates
FROM golang:1.13.5 as builder
WORKDIR /build
ENV CGO_ENABLED=0
ENV GOOS=linux
ENV GOARCH=amd64
RUN mkdir -p ./bin
COPY go.mod go.sum ./
RUN go mod download
COPY ./cmd ./cmd
COPY ./pkg ./pkg
COPY ./internal ./internal
RUN go build -o ./bin/ingress-controller ./cmd/caddy
FROM scratch
COPY ./bin/ingress-controller .
COPY --from=builder /build/bin/ingress-controller .
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
EXPOSE 80 443
ENTRYPOINT ["/ingress-controller"]
ENTRYPOINT ["/ingress-controller"]

View File

@ -1,3 +1,3 @@
build:
@mkdir -p bin
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ./bin/ingress-controller ./cmd/caddy
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ./bin/ingress-controller ./cmd/caddy

View File

@ -6,7 +6,7 @@ metadata:
kubernetes.io/ingress.class: caddy
spec:
rules:
- host: caddy2.kubed.co
- host: example1.MYDOMAIN.TDL
http:
paths:
- path: /hello2
@ -17,7 +17,7 @@ spec:
backend:
serviceName: example
servicePort: 8080
- host: danny2.kubed.co
- host: example2.MYDOMAIN.TDL
http:
paths:
- path: /hello2
@ -28,7 +28,7 @@ spec:
backend:
serviceName: example
servicePort: 8080
# tls:
# - hosts:
# - danny2.kubed.co
# secretName: danny2own
# tls:
# - secretName: ssl-example2.MYDOMAIN.TDL
# hosts:
# - example2.caddy.dev

View File

@ -8,5 +8,5 @@ spec:
app: example
ports:
- protocol: TCP
port: 80
targetPort: 8080
port: 8080
targetPort: 8080

View File

@ -8,5 +8,5 @@ spec:
app: example2
ports:
- protocol: TCP
port: 80
targetPort: 8080
port: 8080
targetPort: 8080

View File

@ -0,0 +1,307 @@
---
apiVersion: v1
kind: Namespace
metadata:
labels:
app: metallb
name: metallb-system
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
labels:
app: metallb
name: speaker
namespace: metallb-system
spec:
allowPrivilegeEscalation: false
allowedCapabilities:
- NET_ADMIN
- NET_RAW
- SYS_ADMIN
fsGroup:
rule: RunAsAny
hostNetwork: true
hostPorts:
- max: 7472
min: 7472
privileged: true
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- '*'
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: metallb
name: controller
namespace: metallb-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: metallb
name: speaker
namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: metallb
name: metallb-system:controller
rules:
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- update
- apiGroups:
- ''
resources:
- services/status
verbs:
- update
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: metallb
name: metallb-system:speaker
rules:
- apiGroups:
- ''
resources:
- services
- endpoints
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- extensions
resourceNames:
- speaker
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app: metallb
name: config-watcher
namespace: metallb-system
rules:
- apiGroups:
- ''
resources:
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: metallb
name: metallb-system:controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-system:controller
subjects:
- kind: ServiceAccount
name: controller
namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: metallb
name: metallb-system:speaker
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-system:speaker
subjects:
- kind: ServiceAccount
name: speaker
namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app: metallb
name: config-watcher
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: config-watcher
subjects:
- kind: ServiceAccount
name: controller
- kind: ServiceAccount
name: speaker
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: metallb
component: speaker
name: speaker
namespace: metallb-system
spec:
selector:
matchLabels:
app: metallb
component: speaker
template:
metadata:
annotations:
prometheus.io/port: '7472'
prometheus.io/scrape: 'true'
labels:
app: metallb
component: speaker
spec:
containers:
- args:
- --port=7472
- --config=config
env:
- name: METALLB_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: METALLB_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
image: metallb/speaker:v0.8.2
imagePullPolicy: IfNotPresent
name: speaker
ports:
- containerPort: 7472
name: monitoring
resources:
limits:
cpu: 100m
memory: 100Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_ADMIN
- NET_RAW
- SYS_ADMIN
drop:
- ALL
readOnlyRootFilesystem: true
hostNetwork: true
nodeSelector:
beta.kubernetes.io/os: linux
serviceAccountName: speaker
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: metallb
component: controller
name: controller
namespace: metallb-system
spec:
revisionHistoryLimit: 3
selector:
matchLabels:
app: metallb
component: controller
template:
metadata:
annotations:
prometheus.io/port: '7472'
prometheus.io/scrape: 'true'
labels:
app: metallb
component: controller
spec:
containers:
- args:
- --port=7472
- --config=config
image: metallb/controller:v0.8.2
imagePullPolicy: IfNotPresent
name: controller
ports:
- containerPort: 7472
name: monitoring
resources:
limits:
cpu: 100m
memory: 100Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- all
readOnlyRootFilesystem: true
nodeSelector:
beta.kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: controller
terminationGracePeriodSeconds: 0
---
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
address-pools:
- name: default
protocol: layer2
addresses:
- 172.17.255.1-172.17.255.250

View File

@ -2,7 +2,7 @@
# if configuring caddy with a config map
# ensure that you update ./configmap.yaml
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: caddy-ingress-controller
@ -40,7 +40,7 @@ spec:
# name: caddy-config
containers:
- name: caddy-ingress-controller
image: gcr.io/danny-239313/ingresscontroller
image: docker.io/MYACCOUNT/caddy-ingress-controller
imagePullPolicy: IfNotPresent
volumeMounts:
- name: tmp
@ -74,6 +74,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- -tls
- -email=test@test.com
# args:
# - -tls
# - -tls-use-staging
# - -email=test@test.com

View File

@ -1,14 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: metrics
namespace: caddy-system
labels:
app: caddy-ingress-controller
spec:
ports:
- name: metrics
port: 80
protocol: TCP
targetPort: metrics
type: "ClusterIP"

View File

@ -1,11 +1,13 @@
apiVersion: skaffold/v1beta8
apiVersion: skaffold/v2alpha1
kind: Config
build:
artifacts:
- image: gcr.io/danny-239313/ingresscontroller
- image: docker.io/MYACCOUNT/caddy-ingress-controller
deploy:
kubectl:
manifests:
- kubernetes/deploy/00_namespace.yaml
- kubernetes/deploy/01_metallb.yaml
- hack/test/example-deployment.yaml
- hack/test/example-ingress.yaml
- hack/test/example-deployment2.yaml
@ -15,6 +17,17 @@ deploy:
- kubernetes/generated/clusterrolebinding.yaml
- kubernetes/generated/deployment.yaml
- kubernetes/generated/serviceaccount.yaml
- kubernetes/generated/metricsservice.yaml
# - kubernetes/generated/configmap.yaml
# - kubernetes/generated/loadbalancer.yaml
- kubernetes/generated/loadbalancer.yaml
portForward:
- resourceType: service
resourceName: caddy-ingress-controller
namespace: caddy-system
address: 0.0.0.0
port: 80
localPort: 80
- resourceType: service
resourceName: caddy-ingress-controller
namespace: caddy-system
address: 0.0.0.0
port: 443
localPort: 443