Browse Source

add k8s custom hpa

dotbalo 6 years ago
parent
commit
7d0867f2d6
43 changed files with 1543 additions and 1 deletions
  1. 21 0
      k8s-custom-hpa/LICENSE
  2. 51 0
      k8s-custom-hpa/Makefile
  3. 346 0
      k8s-custom-hpa/README.md
  4. 7 0
      k8s-custom-hpa/custom-metrics-api/cm-adapter-serving-certs.yaml
  5. 12 0
      k8s-custom-hpa/custom-metrics-api/custom-metrics-apiserver-auth-delegator-cluster-role-binding.yaml
  6. 13 0
      k8s-custom-hpa/custom-metrics-api/custom-metrics-apiserver-auth-reader-role-binding.yaml
  7. 42 0
      k8s-custom-hpa/custom-metrics-api/custom-metrics-apiserver-deployment.yaml
  8. 12 0
      k8s-custom-hpa/custom-metrics-api/custom-metrics-apiserver-resource-reader-cluster-role-binding.yaml
  9. 5 0
      k8s-custom-hpa/custom-metrics-api/custom-metrics-apiserver-service-account.yaml
  10. 11 0
      k8s-custom-hpa/custom-metrics-api/custom-metrics-apiserver-service.yaml
  11. 13 0
      k8s-custom-hpa/custom-metrics-api/custom-metrics-apiservice.yaml
  12. 9 0
      k8s-custom-hpa/custom-metrics-api/custom-metrics-cluster-role.yaml
  13. 14 0
      k8s-custom-hpa/custom-metrics-api/custom-metrics-resource-reader-cluster-role.yaml
  14. 12 0
      k8s-custom-hpa/custom-metrics-api/hpa-custom-metrics-cluster-role-binding.yaml
  15. BIN
      k8s-custom-hpa/diagrams/k8s-hpa-ms.png
  16. BIN
      k8s-custom-hpa/diagrams/k8s-hpa-prom.png
  17. BIN
      k8s-custom-hpa/diagrams/k8s-hpa.png
  18. 22 0
      k8s-custom-hpa/ingress/ingress-nginx-cfg.yaml
  19. 52 0
      k8s-custom-hpa/ingress/ingress-nginx-default-backend.yaml
  20. 62 0
      k8s-custom-hpa/ingress/ingress-nginx-dep.yaml
  21. 126 0
      k8s-custom-hpa/ingress/ingress-nginx-rbac.yaml
  22. 23 0
      k8s-custom-hpa/ingress/ingress-nginx-svc-nodeport.yaml
  23. 4 0
      k8s-custom-hpa/ingress/namespace.yaml
  24. 12 0
      k8s-custom-hpa/metrics-server/auth-delegator.yaml
  25. 13 0
      k8s-custom-hpa/metrics-server/auth-reader.yaml
  26. 13 0
      k8s-custom-hpa/metrics-server/metrics-apiservice.yaml
  27. 42 0
      k8s-custom-hpa/metrics-server/metrics-server-deployment.yaml
  28. 14 0
      k8s-custom-hpa/metrics-server/metrics-server-service.yaml
  29. 37 0
      k8s-custom-hpa/metrics-server/resource-reader.yaml
  30. 7 0
      k8s-custom-hpa/namespaces.yaml
  31. 27 0
      k8s-custom-hpa/output/apiserver-key.pem
  32. 18 0
      k8s-custom-hpa/output/apiserver.csr
  33. 21 0
      k8s-custom-hpa/output/apiserver.pem
  34. 61 0
      k8s-custom-hpa/podinfo/podinfo-dep.yaml
  35. 17 0
      k8s-custom-hpa/podinfo/podinfo-hpa-custom.yaml
  36. 21 0
      k8s-custom-hpa/podinfo/podinfo-hpa.yaml
  37. 28 0
      k8s-custom-hpa/podinfo/podinfo-ingress.yaml
  38. 16 0
      k8s-custom-hpa/podinfo/podinfo-svc.yaml
  39. 236 0
      k8s-custom-hpa/prometheus/prometheus-cfg.yaml
  40. 45 0
      k8s-custom-hpa/prometheus/prometheus-dep.yaml
  41. 40 0
      k8s-custom-hpa/prometheus/prometheus-rbac.yaml
  42. 17 0
      k8s-custom-hpa/prometheus/prometheus-svc.yaml
  43. 1 1
      prometheus-operator/alertmanager.yaml

+ 21 - 0
k8s-custom-hpa/LICENSE

@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018 Stefan Prodan
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 51 - 0
k8s-custom-hpa/Makefile

@@ -0,0 +1,51 @@
+# Makefile for generating TLS certs for the Prometheus custom metrics API adapter
+
+SHELL=bash
+UNAME := $(shell uname)
+PURPOSE:=metrics
+SERVICE_NAME:=custom-metrics-apiserver
+ALT_NAMES:="custom-metrics-apiserver.monitoring","custom-metrics-apiserver.monitoring.svc"
+SECRET_FILE:=custom-metrics-api/cm-adapter-serving-certs.yaml
+
+certs: gensecret rmcerts
+
+.PHONY: gencerts
+gencerts:
+	@echo Generating TLS certs
+	@docker pull cfssl/cfssl
+	@mkdir -p output
+	@touch output/apiserver.pem
+	@touch output/apiserver-key.pem
+	@openssl req -x509 -sha256 -new -nodes -days 365 -newkey rsa:2048 -keyout $(PURPOSE)-ca.key -out $(PURPOSE)-ca.crt -subj "/CN=ca"
+	@echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment","'$(PURPOSE)'"]}}}' > "$(PURPOSE)-ca-config.json"
+	@echo '{"CN":"'$(SERVICE_NAME)'","hosts":[$(ALT_NAMES)],"key":{"algo":"rsa","size":2048}}' | docker run  -v ${HOME}:${HOME} -v ${PWD}/metrics-ca.key:/go/src/github.com/cloudflare/cfssl/metrics-ca.key -v ${PWD}/metrics-ca.crt:/go/src/github.com/cloudflare/cfssl/metrics-ca.crt -v ${PWD}/metrics-ca-config.json:/go/src/github.com/cloudflare/cfssl/metrics-ca-config.json -i cfssl/cfssl gencert -ca=metrics-ca.crt -ca-key=metrics-ca.key -config=metrics-ca-config.json - | docker run --entrypoint=cfssljson -v ${HOME}:${HOME} -v ${PWD}/output:/go/src/github.com/cloudflare/cfssl/output -i cfssl/cfssl -bare output/apiserver
+
+.PHONY: gensecret
+gensecret: gencerts
+	@echo Generating $(SECRET_FILE)
+	@echo "apiVersion: v1" > $(SECRET_FILE)
+	@echo "kind: Secret" >> $(SECRET_FILE)
+	@echo "metadata:" >> $(SECRET_FILE)
+	@echo " name: cm-adapter-serving-certs" >> $(SECRET_FILE)
+	@echo " namespace: monitoring" >> $(SECRET_FILE)
+	@echo "data:" >> $(SECRET_FILE)
+ifeq ($(UNAME), Darwin)
+	@echo " serving.crt: $$(cat output/apiserver.pem | base64)" >> $(SECRET_FILE)
+	@echo " serving.key: $$(cat output/apiserver-key.pem | base64)" >> $(SECRET_FILE)
+endif
+ifeq ($(UNAME), Linux)
+	@echo " serving.crt: $$(cat output/apiserver.pem | base64 -w 0)" >> $(SECRET_FILE)
+	@echo " serving.key: $$(cat output/apiserver-key.pem | base64 -w 0)" >> $(SECRET_FILE)
+endif
+
+.PHONY: rmcerts
+rmcerts:
+	@rm -f apiserver-key.pem apiserver.csr apiserver.pem
+	@rm -f metrics-ca-config.json metrics-ca.crt metrics-ca.key
+
+.PHONY: deploy
+deploy:
+	kubectl create -f ./namespaces.yaml
+	kubectl create -f ./metrics-server
+	kubectl create -f ./prometheus
+	kubectl create -f ./custom-metrics-api

+ 346 - 0
k8s-custom-hpa/README.md

@@ -0,0 +1,346 @@
+# k8s-prom-hpa
+
+Autoscaling is an approach to automatically scale up or down workloads based on the resource usage. 
+Autoscaling in Kubernetes has two dimensions: the Cluster Autoscaler that deals with node scaling 
+operations and the Horizontal Pod Autoscaler that automatically scales the number of pods in a 
+deployment or replica set. The Cluster Autoscaling together with Horizontal Pod Autoscaler can be used 
+to dynamically adjust the computing power as well as the level of parallelism that your system needs to meet SLAs.
+While the Cluster Autoscaler is highly dependent on the underling capabilities of the cloud provider 
+that's hosting your cluster, the HPA can operate independently of your IaaS/PaaS provider. 
+
+The Horizontal Pod Autoscaler feature was first introduced in Kubernetes v1.1 and 
+has evolved a lot since then. Version 1 of the HPA scaled pods based on 
+observed CPU utilization and later on based on memory usage. 
+In Kubernetes 1.6 a new API Custom Metrics API was introduced that enables HPA access to arbitrary metrics. 
+And Kubernetes 1.7 introduced the aggregation layer that allows 3rd party applications to extend the 
+Kubernetes API by registering themselves as API add-ons. 
+The Custom Metrics API along with the aggregation layer made it possible for monitoring systems 
+like Prometheus to expose application-specific metrics to the HPA controller.
+
+The Horizontal Pod Autoscaler is implemented as a control loop that periodically queries 
+the Resource Metrics API for core metrics like CPU/memory and the Custom Metrics API for application-specific metrics.  
+
+![Overview](https://github.com/stefanprodan/k8s-prom-hpa/blob/master/diagrams/k8s-hpa.png)
+
+What follows is a step-by-step guide on configuring HPA v2 for Kubernetes 1.9 or later. 
+You will install the Metrics Server add-on that supplies the core metrics and then you'll use a demo 
+app to showcase pod autoscaling based on CPU and memory usage. In the second part of the guide you will 
+deploy Prometheus and a custom API server. You will register the custom API server with the 
+aggregator layer and then configure HPA with custom metrics supplied by the demo application.
+
+Before you begin you need to install Go 1.8 or later and clone the [k8s-prom-hpa](https://github.com/stefanprodan/k8s-prom-hpa) repo in your `GOPATH`:
+
+```bash
+cd $GOPATH
+git clone https://github.com/stefanprodan/k8s-prom-hpa
+```
+
+### Setting up the Metrics Server
+
+The Kubernetes [Metrics Server](https://github.com/kubernetes-incubator/metrics-server) 
+is a cluster-wide aggregator of resource usage data and is the successor of [Heapster](https://github.com/kubernetes/heapster). 
+The metrics server collects CPU and memory usage for nodes and pods by pooling data from the `kubernetes.summary_api`. 
+The summary API is a memory-efficient API for passing data from Kubelet/cAdvisor to the metrics server.
+
+![Metrics-Server](https://github.com/stefanprodan/k8s-prom-hpa/blob/master/diagrams/k8s-hpa-ms.png)
+
+If in the first version of HPA you would need Heapster to provide CPU and memory metrics, in 
+HPA v2 and Kubernetes 1.8 only the metrics server is required with the 
+`horizontal-pod-autoscaler-use-rest-clients` switched on.
+The HPA rest client is enabled by default in Kubernetes 1.9.
+GKE 1.9 comes with the Metrics Server pre-installed.
+
+Deploy the Metrics Server in the `kube-system` namespace:
+
+```bash
+kubectl create -f ./metrics-server
+```
+
+After one minute the `metric-server` starts reporting CPU and memory usage for nodes and pods.
+
+View nodes metrics:
+
+```bash
+kubectl get --raw "/apis/metrics.k8s.io/v1beta1/nodes" | jq .
+```
+
+View pods metrics:
+
+```bash
+kubectl get --raw "/apis/metrics.k8s.io/v1beta1/pods" | jq .
+```
+
+### Auto Scaling based on CPU and memory usage
+
+You will use a small Golang-based web app to test the Horizontal Pod Autoscaler (HPA).
+
+Deploy [podinfo](https://github.com/stefanprodan/k8s-podinfo) to the `default` namespace:
+
+```bash
+kubectl create -f ./podinfo/podinfo-svc.yaml,./podinfo/podinfo-dep.yaml
+```
+
+Access `podinfo` with the NodePort service at `http://<K8S_PUBLIC_IP>:31198`.
+
+Next define a HPA that maintains a minimum of two replicas and scales up to ten 
+if the CPU average is over 80% or if the memory goes over 200Mi:
+
+```yaml
+apiVersion: autoscaling/v2beta1
+kind: HorizontalPodAutoscaler
+metadata:
+  name: podinfo
+spec:
+  scaleTargetRef:
+    apiVersion: extensions/v1beta1
+    kind: Deployment
+    name: podinfo
+  minReplicas: 2
+  maxReplicas: 10
+  metrics:
+  - type: Resource
+    resource:
+      name: cpu
+      targetAverageUtilization: 80
+  - type: Resource
+    resource:
+      name: memory
+      targetAverageValue: 200Mi
+```
+
+Create the HPA:
+
+```bash
+kubectl create -f ./podinfo/podinfo-hpa.yaml
+```
+
+After a couple of seconds the HPA controller contacts the metrics server and then fetches the CPU 
+and memory usage:
+
+```bash
+kubectl get hpa
+
+NAME      REFERENCE            TARGETS                      MINPODS   MAXPODS   REPLICAS   AGE
+podinfo   Deployment/podinfo   2826240 / 200Mi, 15% / 80%   2         10        2          5m
+```
+
+In order to increase the CPU usage, run a load test with `rakyll/hey`:
+
+```bash
+#install hey
+go get -u github.com/rakyll/hey
+
+#do 10K requests
+hey -n 10000 -q 10 -c 5 http://<K8S_PUBLIC_IP>:31198/
+```
+
+You can monitor the HPA events with:
+
+```bash
+$ kubectl describe hpa
+
+Events:
+  Type    Reason             Age   From                       Message
+  ----    ------             ----  ----                       -------
+  Normal  SuccessfulRescale  7m    horizontal-pod-autoscaler  New size: 4; reason: cpu resource utilization (percentage of request) above target
+  Normal  SuccessfulRescale  3m    horizontal-pod-autoscaler  New size: 8; reason: cpu resource utilization (percentage of request) above target
+```
+
+Remove `podinfo` for the moment. You will deploy it again later on in this tutorial:
+
+```bash
+kubectl delete -f ./podinfo/podinfo-hpa.yaml,./podinfo/podinfo-dep.yaml,./podinfo/podinfo-svc.yaml
+```
+
+### Setting up a Custom Metrics Server 
+
+In order to scale based on custom metrics you need to have two components. 
+One component that collects metrics from your applications and stores them the [Prometheus](https://prometheus.io) time series database.
+And a second component that extends the Kubernetes custom metrics API with the metrics supplied by the collect, the [k8s-prometheus-adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter).
+
+![Custom-Metrics-Server](https://github.com/stefanprodan/k8s-prom-hpa/blob/master/diagrams/k8s-hpa-prom.png)
+
+You will deploy Prometheus and the adapter in a dedicated namespace. 
+
+Create the `monitoring` namespace:
+
+```bash
+kubectl create -f ./namespaces.yaml
+```
+
+Deploy Prometheus v2 in the `monitoring` namespace:
+
+*If you are deploying to GKE you might get an error saying: `Error from server (Forbidden): error when creating`
+This will help you resolve that issue:* [RBAC on GKE](https://github.com/coreos/prometheus-operator/blob/master/Documentation/troubleshooting.md)
+
+```bash
+kubectl create -f ./prometheus
+```
+
+Generate the TLS certificates needed by the Prometheus adapter:
+
+```bash
+make certs
+```
+
+Deploy the Prometheus custom metrics API adapter:
+
+```bash
+kubectl create -f ./custom-metrics-api
+```
+
+List the custom metrics provided by Prometheus:
+
+```bash
+kubectl get --raw "/apis/custom.metrics.k8s.io/v1beta1" | jq .
+```
+
+Get the FS usage for all the pods in the `monitoring` namespace:
+
+```bash
+kubectl get --raw "/apis/custom.metrics.k8s.io/v1beta1/namespaces/monitoring/pods/*/fs_usage_bytes" | jq .
+```
+
+### Auto Scaling based on custom metrics
+
+Create `podinfo` NodePort service and deployment in the `default` namespace:
+
+```bash
+kubectl create -f ./podinfo/podinfo-svc.yaml,./podinfo/podinfo-dep.yaml
+```
+
+The `podinfo` app exposes a custom metric named `http_requests_total`. 
+The Prometheus adapter removes the `_total` suffix and marks the metric as a counter metric.
+
+Get the total requests per second from the custom metrics API:
+
+```bash
+kubectl get --raw "/apis/custom.metrics.k8s.io/v1beta1/namespaces/default/pods/*/http_requests" | jq .
+```
+```json
+{
+  "kind": "MetricValueList",
+  "apiVersion": "custom.metrics.k8s.io/v1beta1",
+  "metadata": {
+    "selfLink": "/apis/custom.metrics.k8s.io/v1beta1/namespaces/default/pods/%2A/http_requests"
+  },
+  "items": [
+    {
+      "describedObject": {
+        "kind": "Pod",
+        "namespace": "default",
+        "name": "podinfo-6b86c8ccc9-kv5g9",
+        "apiVersion": "/__internal"
+      },
+      "metricName": "http_requests",
+      "timestamp": "2018-01-10T16:49:07Z",
+      "value": "901m"
+    },
+    {
+      "describedObject": {
+        "kind": "Pod",
+        "namespace": "default",
+        "name": "podinfo-6b86c8ccc9-nm7bl",
+        "apiVersion": "/__internal"
+      },
+      "metricName": "http_requests",
+      "timestamp": "2018-01-10T16:49:07Z",
+      "value": "898m"
+    }
+  ]
+}
+```
+
+The `m` represents `milli-units`, so for example, `901m` means 901 milli-requests.
+
+Create a HPA that will scale up the `podinfo` deployment if the number of requests goes over 10 per second:
+
+```yaml
+apiVersion: autoscaling/v2beta1
+kind: HorizontalPodAutoscaler
+metadata:
+  name: podinfo
+spec:
+  scaleTargetRef:
+    apiVersion: extensions/v1beta1
+    kind: Deployment
+    name: podinfo
+  minReplicas: 2
+  maxReplicas: 10
+  metrics:
+  - type: Pods
+    pods:
+      metricName: http_requests
+      targetAverageValue: 10
+```
+
+Deploy the `podinfo` HPA in the `default` namespace:
+
+```bash
+kubectl create -f ./podinfo/podinfo-hpa-custom.yaml
+```
+
+After a couple of seconds the HPA fetches the `http_requests` value from the metrics API:
+
+```bash
+kubectl get hpa
+
+NAME      REFERENCE            TARGETS     MINPODS   MAXPODS   REPLICAS   AGE
+podinfo   Deployment/podinfo   899m / 10   2         10        2          1m
+```
+
+Apply some load on the `podinfo` service with 25 requests per second:
+
+```bash
+#install hey
+go get -u github.com/rakyll/hey
+
+#do 10K requests rate limited at 25 QPS
+hey -n 10000 -q 5 -c 5 http://<K8S-IP>:31198/healthz
+```
+
+After a few minutes the HPA begins to scale up the deployment:
+
+```
+kubectl describe hpa
+
+Name:                       podinfo
+Namespace:                  default
+Reference:                  Deployment/podinfo
+Metrics:                    ( current / target )
+  "http_requests" on pods:  9059m / 10
+Min replicas:               2
+Max replicas:               10
+
+Events:
+  Type    Reason             Age   From                       Message
+  ----    ------             ----  ----                       -------
+  Normal  SuccessfulRescale  2m    horizontal-pod-autoscaler  New size: 3; reason: pods metric http_requests above target
+```
+
+At the current rate of requests per second the deployment will never get to the max value of 10 pods. 
+Three replicas are enough to keep the RPS under 10 per each pod.
+
+After the load tests finishes, the HPA down scales the deployment to it's initial replicas:
+
+```
+Events:
+  Type    Reason             Age   From                       Message
+  ----    ------             ----  ----                       -------
+  Normal  SuccessfulRescale  5m    horizontal-pod-autoscaler  New size: 3; reason: pods metric http_requests above target
+  Normal  SuccessfulRescale  21s   horizontal-pod-autoscaler  New size: 2; reason: All metrics below target
+```
+
+You may have noticed that the autoscaler doesn't react immediately to usage spikes. 
+By default the metrics sync happens once every 30 seconds and scaling up/down can 
+only happen if there was no rescaling within the last 3-5 minutes. 
+In this way, the HPA prevents rapid execution of conflicting decisions and gives time for the 
+Cluster Autoscaler to kick in.
+
+### Conclusions
+
+Not all systems can meet their SLAs by relying on CPU/memory usage metrics alone, most web and mobile 
+backends require autoscaling based on requests per second to handle any traffic bursts. 
+For ETL apps, auto scaling could be triggered by the job queue length exceeding some threshold and so on. 
+By instrumenting your applications with Prometheus and exposing the right metrics for autoscaling you can 
+fine tune your apps to better handle bursts and ensure high availability.

File diff suppressed because it is too large
+ 7 - 0
k8s-custom-hpa/custom-metrics-api/cm-adapter-serving-certs.yaml


+ 12 - 0
k8s-custom-hpa/custom-metrics-api/custom-metrics-apiserver-auth-delegator-cluster-role-binding.yaml

@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: custom-metrics:system:auth-delegator
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: system:auth-delegator
+subjects:
+- kind: ServiceAccount
+  name: custom-metrics-apiserver
+  namespace: monitoring

+ 13 - 0
k8s-custom-hpa/custom-metrics-api/custom-metrics-apiserver-auth-reader-role-binding.yaml

@@ -0,0 +1,13 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+  name: custom-metrics-auth-reader
+  namespace: kube-system
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: extension-apiserver-authentication-reader
+subjects:
+- kind: ServiceAccount
+  name: custom-metrics-apiserver
+  namespace: monitoring

+ 42 - 0
k8s-custom-hpa/custom-metrics-api/custom-metrics-apiserver-deployment.yaml

@@ -0,0 +1,42 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  labels:
+    app: custom-metrics-apiserver
+  name: custom-metrics-apiserver
+  namespace: monitoring
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: custom-metrics-apiserver
+  template:
+    metadata:
+      labels:
+        app: custom-metrics-apiserver
+      name: custom-metrics-apiserver
+    spec:
+      serviceAccountName: custom-metrics-apiserver
+      containers:
+      - name: custom-metrics-apiserver
+        image: quay.io/coreos/k8s-prometheus-adapter-amd64:v0.2.0
+        args:
+        - /adapter
+        - --secure-port=6443
+        - --tls-cert-file=/var/run/serving-cert/serving.crt
+        - --tls-private-key-file=/var/run/serving-cert/serving.key
+        - --logtostderr=true
+        - --prometheus-url=http://prometheus:9090/
+        - --metrics-relist-interval=30s
+        - --rate-interval=5m
+        - --v=10
+        ports:
+        - containerPort: 6443
+        volumeMounts:
+        - mountPath: /var/run/serving-cert
+          name: volume-serving-cert
+          readOnly: true
+      volumes:
+      - name: volume-serving-cert
+        secret:
+          secretName: cm-adapter-serving-certs

+ 12 - 0
k8s-custom-hpa/custom-metrics-api/custom-metrics-apiserver-resource-reader-cluster-role-binding.yaml

@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: custom-metrics-resource-reader
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: custom-metrics-resource-reader
+subjects:
+- kind: ServiceAccount
+  name: custom-metrics-apiserver
+  namespace: monitoring

+ 5 - 0
k8s-custom-hpa/custom-metrics-api/custom-metrics-apiserver-service-account.yaml

@@ -0,0 +1,5 @@
+kind: ServiceAccount
+apiVersion: v1
+metadata:
+  name: custom-metrics-apiserver
+  namespace: monitoring

+ 11 - 0
k8s-custom-hpa/custom-metrics-api/custom-metrics-apiserver-service.yaml

@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: custom-metrics-apiserver
+  namespace: monitoring
+spec:
+  ports:
+  - port: 443
+    targetPort: 6443
+  selector:
+    app: custom-metrics-apiserver

+ 13 - 0
k8s-custom-hpa/custom-metrics-api/custom-metrics-apiservice.yaml

@@ -0,0 +1,13 @@
+apiVersion: apiregistration.k8s.io/v1beta1
+kind: APIService
+metadata:
+  name: v1beta1.custom.metrics.k8s.io
+spec:
+  service:
+    name: custom-metrics-apiserver
+    namespace: monitoring
+  group: custom.metrics.k8s.io
+  version: v1beta1
+  insecureSkipTLSVerify: true
+  groupPriorityMinimum: 100
+  versionPriority: 100

+ 9 - 0
k8s-custom-hpa/custom-metrics-api/custom-metrics-cluster-role.yaml

@@ -0,0 +1,9 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: custom-metrics-server-resources
+rules:
+- apiGroups:
+  - custom.metrics.k8s.io
+  resources: ["*"]
+  verbs: ["*"]

+ 14 - 0
k8s-custom-hpa/custom-metrics-api/custom-metrics-resource-reader-cluster-role.yaml

@@ -0,0 +1,14 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: custom-metrics-resource-reader
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - namespaces
+  - pods
+  - services
+  verbs:
+  - get
+  - list

+ 12 - 0
k8s-custom-hpa/custom-metrics-api/hpa-custom-metrics-cluster-role-binding.yaml

@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: hpa-controller-custom-metrics
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: custom-metrics-server-resources
+subjects:
+- kind: ServiceAccount
+  name: horizontal-pod-autoscaler
+  namespace: kube-system

BIN
k8s-custom-hpa/diagrams/k8s-hpa-ms.png


BIN
k8s-custom-hpa/diagrams/k8s-hpa-prom.png


BIN
k8s-custom-hpa/diagrams/k8s-hpa.png


+ 22 - 0
k8s-custom-hpa/ingress/ingress-nginx-cfg.yaml

@@ -0,0 +1,22 @@
+---
+kind: ConfigMap
+apiVersion: v1
+data:
+  enable-vts-status: "true"
+metadata:
+  name: nginx-configuration
+  namespace: ingress-nginx
+  labels:
+    app: ingress-nginx
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+  name: tcp-services
+  namespace: ingress-nginx
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+  name: udp-services
+  namespace: ingress-nginx

+ 52 - 0
k8s-custom-hpa/ingress/ingress-nginx-default-backend.yaml

@@ -0,0 +1,52 @@
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: default-http-backend
+  labels:
+    app: default-http-backend
+  namespace: ingress-nginx
+spec:
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: default-http-backend
+    spec:
+      terminationGracePeriodSeconds: 60
+      containers:
+      - name: default-http-backend
+        # Any image is permissable as long as:
+        # 1. It serves a 404 page at /
+        # 2. It serves 200 on a /healthz endpoint
+        image: gcr.io/google_containers/defaultbackend:1.4
+        livenessProbe:
+          httpGet:
+            path: /healthz
+            port: 8080
+            scheme: HTTP
+          initialDelaySeconds: 30
+          timeoutSeconds: 5
+        ports:
+        - containerPort: 8080
+        resources:
+          limits:
+            cpu: 10m
+            memory: 20Mi
+          requests:
+            cpu: 10m
+            memory: 20Mi
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: default-http-backend
+  namespace: ingress-nginx
+  labels:
+    app: default-http-backend
+spec:
+  ports:
+  - port: 80
+    targetPort: 8080
+  selector:
+    app: default-http-backend

+ 62 - 0
k8s-custom-hpa/ingress/ingress-nginx-dep.yaml

@@ -0,0 +1,62 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: nginx-ingress-controller
+  namespace: ingress-nginx 
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: ingress-nginx
+  template:
+    metadata:
+      labels:
+        app: ingress-nginx
+      annotations:
+        prometheus.io/port: '10254'
+        prometheus.io/scrape: 'true'
+    spec:
+      serviceAccountName: nginx-ingress-serviceaccount
+      containers:
+        - name: nginx-ingress-controller
+          image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0
+          args:
+            - /nginx-ingress-controller
+            - --default-backend-service=$(POD_NAMESPACE)/default-http-backend
+            - --configmap=$(POD_NAMESPACE)/nginx-configuration
+            - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
+            - --udp-services-configmap=$(POD_NAMESPACE)/udp-services
+            - --annotations-prefix=nginx.ingress.kubernetes.io
+          env:
+            - name: POD_NAME
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.name
+            - name: POD_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+          ports:
+          - name: http
+            containerPort: 80
+          - name: https
+            containerPort: 443
+          livenessProbe:
+            failureThreshold: 3
+            httpGet:
+              path: /healthz
+              port: 10254
+              scheme: HTTP
+            initialDelaySeconds: 10
+            periodSeconds: 10
+            successThreshold: 1
+            timeoutSeconds: 1
+          readinessProbe:
+            failureThreshold: 3
+            httpGet:
+              path: /healthz
+              port: 10254
+              scheme: HTTP
+            periodSeconds: 10
+            successThreshold: 1
+            timeoutSeconds: 1

+ 126 - 0
k8s-custom-hpa/ingress/ingress-nginx-rbac.yaml

@@ -0,0 +1,126 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: nginx-ingress-serviceaccount
+  namespace: ingress-nginx
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: nginx-ingress-clusterrole
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - configmaps
+      - endpoints
+      - nodes
+      - pods
+      - secrets
+    verbs:
+      - list
+      - watch
+  - apiGroups:
+      - ""
+    resources:
+      - nodes
+    verbs:
+      - get
+  - apiGroups:
+      - ""
+    resources:
+      - services
+    verbs:
+      - get
+      - list
+      - watch
+  - apiGroups:
+      - "extensions"
+    resources:
+      - ingresses
+    verbs:
+      - get
+      - list
+      - watch
+  - apiGroups:
+      - ""
+    resources:
+        - events
+    verbs:
+        - create
+        - patch
+  - apiGroups:
+      - "extensions"
+    resources:
+      - ingresses/status
+    verbs:
+      - update
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+  name: nginx-ingress-role
+  namespace: ingress-nginx
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - configmaps
+      - pods
+      - secrets
+      - namespaces
+    verbs:
+      - get
+  - apiGroups:
+      - ""
+    resources:
+      - configmaps
+    resourceNames:
+      # Defaults to "<election-id>-<ingress-class>"
+      # Here: "<ingress-controller-leader>-<nginx>"
+      # This has to be adapted if you change either parameter
+      # when launching the nginx-ingress-controller.
+      - "ingress-controller-leader-nginx"
+    verbs:
+      - get
+      - update
+  - apiGroups:
+      - ""
+    resources:
+      - configmaps
+    verbs:
+      - create
+  - apiGroups:
+      - ""
+    resources:
+      - endpoints
+    verbs:
+      - get
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+  name: nginx-ingress-role-nisa-binding
+  namespace: ingress-nginx
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: nginx-ingress-role
+subjects:
+  - kind: ServiceAccount
+    name: nginx-ingress-serviceaccount
+    namespace: ingress-nginx
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: nginx-ingress-clusterrole-nisa-binding
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: nginx-ingress-clusterrole
+subjects:
+  - kind: ServiceAccount
+    name: nginx-ingress-serviceaccount
+    namespace: ingress-nginx

+ 23 - 0
k8s-custom-hpa/ingress/ingress-nginx-svc-nodeport.yaml

@@ -0,0 +1,23 @@
+# edit /etc/kubernetes/manifests/kube-apiserver.yaml
+# add command argument - --service-node-port-range=80-32767
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: ingress-nginx
+  namespace: ingress-nginx
+spec:
+  type: NodePort
+  ports:
+  - name: http
+    port: 80
+    targetPort: 80
+    nodePort: 80
+    protocol: TCP
+  - name: https
+    port: 443
+    targetPort: 443
+    nodePort: 443
+    protocol: TCP
+  selector:
+    app: ingress-nginx

+ 4 - 0
k8s-custom-hpa/ingress/namespace.yaml

@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: ingress-nginx

+ 12 - 0
k8s-custom-hpa/metrics-server/auth-delegator.yaml

@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: metrics-server:system:auth-delegator
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: system:auth-delegator
+subjects:
+- kind: ServiceAccount
+  name: metrics-server
+  namespace: kube-system

+ 13 - 0
k8s-custom-hpa/metrics-server/auth-reader.yaml

@@ -0,0 +1,13 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+  name: metrics-server-auth-reader
+  namespace: kube-system
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: extension-apiserver-authentication-reader
+subjects:
+- kind: ServiceAccount
+  name: metrics-server
+  namespace: kube-system

+ 13 - 0
k8s-custom-hpa/metrics-server/metrics-apiservice.yaml

@@ -0,0 +1,13 @@
+apiVersion: apiregistration.k8s.io/v1beta1
+kind: APIService
+metadata:
+  name: v1beta1.metrics.k8s.io
+spec:
+  service:
+    name: metrics-server
+    namespace: kube-system
+  group: metrics.k8s.io
+  version: v1beta1
+  insecureSkipTLSVerify: true
+  groupPriorityMinimum: 100
+  versionPriority: 100

+ 42 - 0
k8s-custom-hpa/metrics-server/metrics-server-deployment.yaml

@@ -0,0 +1,42 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: metrics-server
+  namespace: kube-system
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: metrics-server
+  namespace: kube-system
+  labels:
+    k8s-app: metrics-server
+spec:
+  selector:
+    matchLabels:
+      k8s-app: metrics-server
+  template:
+    metadata:
+      name: metrics-server
+      labels:
+        k8s-app: metrics-server
+    spec:
+      serviceAccountName: metrics-server
+      volumes:
+      # mount in tmp so we can safely use from-scratch images and/or read-only containers
+      - name: tmp-dir
+        emptyDir: {}
+      containers:
+      - command:
+        - /metrics-server
+        - --kubelet-insecure-tls
+        - --kubelet-preferred-address-types=InternalIP
+        # - --source=kubernetes.summary_api:https://kubernetes.default?kubeletHttps=true&kubeletPort=10250&insecure=true
+        name: metrics-server
+        image: dotbalo/metrics-server-amd64:v0.3.1
+        imagePullPolicy: Always
+        volumeMounts:
+        - name: tmp-dir
+          mountPath: /tmp
+

+ 14 - 0
k8s-custom-hpa/metrics-server/metrics-server-service.yaml

@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: metrics-server
+  namespace: kube-system
+  labels:
+    kubernetes.io/name: "Metrics-server"
+spec:
+  selector:
+    k8s-app: metrics-server
+  ports:
+  - port: 443
+    protocol: TCP
+    targetPort: 443

+ 37 - 0
k8s-custom-hpa/metrics-server/resource-reader.yaml

@@ -0,0 +1,37 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: system:metrics-server
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - nodes
+  - nodes/stats
+  - namespaces
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - "extensions"
+  resources:
+  - deployments
+  verbs:
+  - get
+  - list
+  - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: system:metrics-server
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: system:metrics-server
+subjects:
+- kind: ServiceAccount
+  name: metrics-server
+  namespace: kube-system

+ 7 - 0
k8s-custom-hpa/namespaces.yaml

@@ -0,0 +1,7 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: monitoring
+
+

+ 27 - 0
k8s-custom-hpa/output/apiserver-key.pem

@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEArpHOTIqP4Or/O9Bj1gujWfORzQM7Xa9lKL4IR50pFH57Qbix
+EE72I/DAcPEXqdDjH8kSfIm/mcmGcbxcpXfIrkUJtHD3CqAfsgpaVttFtAAJTwpr
+y0m3Qb5sYLKIjMZ56lYcpW65tNibfeU2DJUPD/WDTe9/Q4+RkEOKIsbuXDvU3V80
+KLzTkhtJJCv3V/4mZDKrTgkdUfUUakQ3iSRdZwPOvFpW06urJmeKMWMUBdKmU/zO
+Zn89xHRDtTv/a1pgz+dm6AIpdhYULwI6bcBP5pp0eGBB204KT1OE2lbkpkKzv7RV
+fV44ACbnVzx6qIYzLpRFZL2eTAuGVX3uK3KbMwIDAQABAoIBAQCF4cEMJsCNyOyS
+nRHUWPygwhKx3qarBRdaFdQXyEp3M0God9MkJMrVaZDNjbmOPWvTNsx8wtA1S79W
+fxW2uZUpQj1dK73rYj6stw/tySE+GUEKY32kXRa0zywjRQlAnf9mX0B9B9xY/D7U
+YPQA1ljE7pATN3itAfCraYEJhvPLu7nABGO+BRPWYeu3w8EJDHz0ftdYUI1lTS8b
+hMCU/a5cQlRXDXwMMSh/tYsHcInIxcNpzj6VIS++cbibYuBCiHZx5ZhNfB/1GzHs
+6VXO0gO/e1cjUdwL/e/7jHFYZK6kiAtvUkMmrGdoYrX3IxFWYrQ4iI9wkLh53C7i
+ek9WXpSRAoGBAOdvj1kN79t6BIlNR3o7C/zT0Wpa6b1aUIUmHF4aIfmzU4qMWpH0
+4HYwcN/tyBqBkSDDelHcy10A0szF7mNhze+EBAJYLI6abMXIyzO2qveYddDt5FM+
+x67YQaHo9WetKUzFPGFqB7Y1d0jQk9R3fYObJDkEe9oUs/v0L2ICpJ2XAoGBAMEZ
+HAXBBZOv88c3VPxXAxpKCtTK3ZS5gF/qTNi+A/OJjZvBPahHMLkl7p/EUMd+XtFI
+qks3q8PWB83h1DJs572Cyz/f0+gWNUu1n5kysFJBx/0EUjs0lKOZZYjT6SiUtOF/
+Ahy3JB0FizCqAEhDc31PD6RtDOxUAPXtVhOL3BrFAoGAMJ3PyA5Ub49vOoSAWl3j
+A8ZfPG9CoKNDPOUnmmD0+CZCi102fWiqakl8vJ2PpnnHSdGmdWHjWnMc1SVTEY9v
+GjxqNy/clEw+Q0IN5ZweyAo/3ZbLmu0aN1F4YBG204OdpGHecDw8yHrsxuAB3eUH
++chyUIPvnH0y3d+KwJT7jZsCgYAgGFlVB4EyzAMb8LF6g8frPbVRKyrrRm31A0+k
+kui7OPjFk7f42WRa0dg4y/uVPQbriEVt3SH6mf+QE+IeQ4W9NRktU7m4EMDpBZYy
+c39lHLPt9SLp3lJ6YsvCk15ktVrfRCoItdQV0s8Q1qDXYga3JEX4UXvdaBUEy18M
+Pqnl9QKBgHJvBTXi60QSs0JfapN4CpJ7C3uX8G8CRXzvWAGXM4MruWQSLKWU1BPI
+e97C4z1UbGfJ4qN1NW13eXlChaJ9dtDY6tqEtadfy5Krhxg5Medr+6UpQNZHO6rk
+5qjvhK+2KHOKRYchWmvWkpn3eKvmkRS6EfGXMNFkrZxhNBl7MRBs
+-----END RSA PRIVATE KEY-----

+ 18 - 0
k8s-custom-hpa/output/apiserver.csr

@@ -0,0 +1,18 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIIC0jCCAboCAQAwIzEhMB8GA1UEAxMYY3VzdG9tLW1ldHJpY3MtYXBpc2VydmVy
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArpHOTIqP4Or/O9Bj1guj
+WfORzQM7Xa9lKL4IR50pFH57QbixEE72I/DAcPEXqdDjH8kSfIm/mcmGcbxcpXfI
+rkUJtHD3CqAfsgpaVttFtAAJTwpry0m3Qb5sYLKIjMZ56lYcpW65tNibfeU2DJUP
+D/WDTe9/Q4+RkEOKIsbuXDvU3V80KLzTkhtJJCv3V/4mZDKrTgkdUfUUakQ3iSRd
+ZwPOvFpW06urJmeKMWMUBdKmU/zOZn89xHRDtTv/a1pgz+dm6AIpdhYULwI6bcBP
+5pp0eGBB204KT1OE2lbkpkKzv7RVfV44ACbnVzx6qIYzLpRFZL2eTAuGVX3uK3Kb
+MwIDAQABoGowaAYJKoZIhvcNAQkOMVswWTBXBgNVHREEUDBOgiNjdXN0b20tbWV0
+cmljcy1hcGlzZXJ2ZXIubW9uaXRvcmluZ4InY3VzdG9tLW1ldHJpY3MtYXBpc2Vy
+dmVyLm1vbml0b3Jpbmcuc3ZjMA0GCSqGSIb3DQEBCwUAA4IBAQA7WON+m/aX4rzm
+qAR6wE65VQQX2cJx2rpMXgHT+gvtkL+N5wtXrdxlbMlyTZHgMQEnsBZt1YIVIWS3
+1+mNlKCYUTgpnmf3TbT30Ucs56wcM8cIVoiwkgHxned4VsuK40b6y3VVDTwBWobR
+VQ2rvQRip4mI36YrWmDXUgViC+uIpmgzoaxQHcbEiDdaSdXyMtgLDuczxfwqDPQ0
+wUTgDY86Blb2CNjwDiiVf8VbmBln1UsDZUtQuNO8vuNEkIMVxk3qyMuy6VJMnM6+
+YFB52Ph6xDXbOEkAEnsBJRPUFov+wsK7N+rc2UYXEiKbGjKWXmclJxDiY7O4yHwt
+Qlf+dcso
+-----END CERTIFICATE REQUEST-----

+ 21 - 0
k8s-custom-hpa/output/apiserver.pem

@@ -0,0 +1,21 @@
+-----BEGIN CERTIFICATE-----
+MIIDeTCCAmGgAwIBAgIUaoFA3PnX+H96yncteE2Tmlx2tj4wDQYJKoZIhvcNAQEL
+BQAwDTELMAkGA1UEAwwCY2EwHhcNMTgxMjI1MDI0MjAwWhcNMjMxMjI0MDI0MjAw
+WjAjMSEwHwYDVQQDExhjdXN0b20tbWV0cmljcy1hcGlzZXJ2ZXIwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCukc5Mio/g6v870GPWC6NZ85HNAztdr2Uo
+vghHnSkUfntBuLEQTvYj8MBw8Rep0OMfyRJ8ib+ZyYZxvFyld8iuRQm0cPcKoB+y
+ClpW20W0AAlPCmvLSbdBvmxgsoiMxnnqVhylbrm02Jt95TYMlQ8P9YNN739Dj5GQ
+Q4oixu5cO9TdXzQovNOSG0kkK/dX/iZkMqtOCR1R9RRqRDeJJF1nA868WlbTq6sm
+Z4oxYxQF0qZT/M5mfz3EdEO1O/9rWmDP52boAil2FhQvAjptwE/mmnR4YEHbTgpP
+U4TaVuSmQrO/tFV9XjgAJudXPHqohjMulEVkvZ5MC4ZVfe4rcpszAgMBAAGjgbow
+gbcwDgYDVR0PAQH/BAQDAgWgMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFAWw2RpX
+YUxOTm1HYvNP5RQC0kYXMB8GA1UdIwQYMBaAFMx2FHTDEVtPWMh+LnQNzEn1KGCe
+MFcGA1UdEQRQME6CI2N1c3RvbS1tZXRyaWNzLWFwaXNlcnZlci5tb25pdG9yaW5n
+gidjdXN0b20tbWV0cmljcy1hcGlzZXJ2ZXIubW9uaXRvcmluZy5zdmMwDQYJKoZI
+hvcNAQELBQADggEBAJsPY3+dOh7/pUjjIHspoaT8dT1aJaazUrrhnQ8kWYx6sU9i
+D3LUsPJL6fOmxqAUsEHLlnVn7N7XrBAqsjN3ekR6oNx03GYuh5r1eqZVUjTEyr6q
+qGK9nRDE5HBzBoMJQwgNAW0FVdHspOVisYhiIzi9kSoiV1fo5fEr1sh4+AjAa2xJ
+/WFbQhRXbBl4JIvvIUrmeWBOkzBGz/u9e2b5946FJNBlY17bFqbSjbn28WrGd7+K
+2G9RSF6lR1rb7rtRywlROKs9jfNFZyl6AzqhuLbcDteGjj/zG6ndIIjnVHf4IvM1
+NB0DBsOmsDBTOp/mYaP9lHYwOmA+73kRL1iysaY=
+-----END CERTIFICATE-----

+ 61 - 0
k8s-custom-hpa/podinfo/podinfo-dep.yaml

@@ -0,0 +1,61 @@
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: podinfo
+spec:
+  replicas: 2
+  template:
+    metadata:
+      labels:
+        app: podinfo
+      annotations:
+        prometheus.io/scrape: 'true'
+    spec:
+      containers:
+      - name: podinfod
+        image: stefanprodan/podinfo:0.0.1
+        imagePullPolicy: Always
+        command:
+          - ./podinfo
+          - -port=9898
+          - -logtostderr=true
+          - -v=2
+        volumeMounts:
+          - name: metadata
+            mountPath: /etc/podinfod/metadata
+            readOnly: true
+        ports:
+        - containerPort: 9898
+          protocol: TCP
+        readinessProbe:
+          httpGet:
+            path: /readyz
+            port: 9898
+          initialDelaySeconds: 1
+          periodSeconds: 2
+          failureThreshold: 1
+        livenessProbe:
+          httpGet:
+            path: /healthz
+            port: 9898
+          initialDelaySeconds: 1
+          periodSeconds: 3
+          failureThreshold: 2
+        resources:
+          requests:
+            memory: "32Mi"
+            cpu: "1m"
+          limits:
+            memory: "256Mi"
+            cpu: "100m"
+      volumes:
+        - name: metadata
+          downwardAPI:
+            items:
+              - path: "labels"
+                fieldRef:
+                  fieldPath: metadata.labels
+              - path: "annotations"
+                fieldRef:
+                  fieldPath: metadata.annotations

+ 17 - 0
k8s-custom-hpa/podinfo/podinfo-hpa-custom.yaml

@@ -0,0 +1,17 @@
+---
+apiVersion: autoscaling/v2beta1
+kind: HorizontalPodAutoscaler
+metadata:
+  name: podinfo
+spec:
+  scaleTargetRef:
+    apiVersion: extensions/v1beta1
+    kind: Deployment
+    name: podinfo
+  minReplicas: 2
+  maxReplicas: 10
+  metrics:
+  - type: Pods
+    pods:
+      metricName: http_requests
+      targetAverageValue: 10

+ 21 - 0
k8s-custom-hpa/podinfo/podinfo-hpa.yaml

@@ -0,0 +1,21 @@
+---
+apiVersion: autoscaling/v2beta1
+kind: HorizontalPodAutoscaler
+metadata:
+  name: podinfo
+spec:
+  scaleTargetRef:
+    apiVersion: extensions/v1beta1
+    kind: Deployment
+    name: podinfo
+  minReplicas: 2
+  maxReplicas: 10
+  metrics:
+  - type: Resource
+    resource:
+      name: cpu
+      targetAverageUtilization: 80
+  - type: Resource
+    resource:
+      name: memory
+      targetAverageValue: 200Mi

+ 28 - 0
k8s-custom-hpa/podinfo/podinfo-ingress.yaml

@@ -0,0 +1,28 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: podinfo-service
+spec:
+  ports:
+  - port: 9898
+    targetPort: 9898
+    protocol: TCP
+  selector:
+    app: podinfo
+---
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+  name: podinfo
+  annotations:
+    kubernetes.io/ingress.class: nginx
+spec:
+  rules:
+  - host: podinfo.weavedx.com
+    http:
+      paths:
+      - path: "/"
+        backend:
+          serviceName: podinfo-service
+          servicePort: 9898

+ 16 - 0
k8s-custom-hpa/podinfo/podinfo-svc.yaml

@@ -0,0 +1,16 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: podinfo
+  labels:
+    app: podinfo
+spec:
+  type: NodePort
+  ports:
+    - port: 9898
+      targetPort: 9898
+      nodePort: 31198
+      protocol: TCP
+  selector:
+    app: podinfo

+ 236 - 0
k8s-custom-hpa/prometheus/prometheus-cfg.yaml

@@ -0,0 +1,236 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+  labels:
+    app: prometheus
+  name: prometheus-config
+  namespace: monitoring
+data:
+  prometheus.yml: |
+    # A scrape configuration for running Prometheus on a Kubernetes cluster.
+    # This uses separate scrape configs for cluster components (i.e. API server, node)
+    # and services to allow each to use different authentication configs.
+    #
+    # Kubernetes labels will be added as Prometheus labels on metrics via the
+    # `labelmap` relabeling action.
+    #
+    # If you are using Kubernetes 1.7.2 or earlier, please take note of the comments
+    # for the kubernetes-cadvisor job; you will need to edit or remove this job.
+
+    # Scrape config for API servers.
+    #
+    # Kubernetes exposes API servers as endpoints to the default/kubernetes
+    # service so this uses `endpoints` role and uses relabelling to only keep
+    # the endpoints associated with the default/kubernetes service using the
+    # default named port `https`. This works for single API server deployments as
+    # well as HA API server deployments.
+    global:
+      scrape_interval: 15s
+      scrape_timeout: 10s
+      evaluation_interval: 1m
+
+    scrape_configs:
+    - job_name: 'kubernetes-apiservers'
+
+      kubernetes_sd_configs:
+      - role: endpoints
+
+      # Default to scraping over https. If required, just disable this or change to
+      # `http`.
+      scheme: https
+
+      # This TLS & bearer token file config is used to connect to the actual scrape
+      # endpoints for cluster components. This is separate to discovery auth
+      # configuration because discovery & scraping are two separate concerns in
+      # Prometheus. The discovery auth config is automatic if Prometheus runs inside
+      # the cluster. Otherwise, more config options have to be provided within the
+      # <kubernetes_sd_config>.
+      tls_config:
+        ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+        # If your node certificates are self-signed or use a different CA to the
+        # master CA, then disable certificate verification below. Note that
+        # certificate verification is an integral part of a secure infrastructure
+        # so this should only be disabled in a controlled environment. You can
+        # disable certificate verification by uncommenting the line below.
+        #
+        # insecure_skip_verify: true
+      bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+      # Keep only the default/kubernetes service endpoints for the https port. This
+      # will add targets for each API server which Kubernetes adds an endpoint to
+      # the default/kubernetes service.
+      relabel_configs:
+      - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
+        action: keep
+        regex: default;kubernetes;https
+
+    # Scrape config for nodes (kubelet).
+    #
+    # Rather than connecting directly to the node, the scrape is proxied though the
+    # Kubernetes apiserver.  This means it will work if Prometheus is running out of
+    # cluster, or can't connect to nodes for some other reason (e.g. because of
+    # firewalling).
+    - job_name: 'kubernetes-nodes'
+
+      # Default to scraping over https. If required, just disable this or change to
+      # `http`.
+      scheme: https
+
+      # This TLS & bearer token file config is used to connect to the actual scrape
+      # endpoints for cluster components. This is separate to discovery auth
+      # configuration because discovery & scraping are two separate concerns in
+      # Prometheus. The discovery auth config is automatic if Prometheus runs inside
+      # the cluster. Otherwise, more config options have to be provided within the
+      # <kubernetes_sd_config>.
+      tls_config:
+        ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+      bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+      kubernetes_sd_configs:
+      - role: node
+
+      relabel_configs:
+      - action: labelmap
+        regex: __meta_kubernetes_node_label_(.+)
+      - target_label: __address__
+        replacement: kubernetes.default.svc:443
+      - source_labels: [__meta_kubernetes_node_name]
+        regex: (.+)
+        target_label: __metrics_path__
+        replacement: /api/v1/nodes/${1}/proxy/metrics
+
+    # Scrape config for Kubelet cAdvisor.
+    #
+    # This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics
+    # (those whose names begin with 'container_') have been removed from the
+    # Kubelet metrics endpoint.  This job scrapes the cAdvisor endpoint to
+    # retrieve those metrics.
+    #
+    # In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor
+    # HTTP endpoint; use "replacement: /api/v1/nodes/${1}:4194/proxy/metrics"
+    # in that case (and ensure cAdvisor's HTTP server hasn't been disabled with
+    # the --cadvisor-port=0 Kubelet flag).
+    #
+    # This job is not necessary and should be removed in Kubernetes 1.6 and
+    # earlier versions, or it will cause the metrics to be scraped twice.
+    - job_name: 'kubernetes-cadvisor'
+
+      # Default to scraping over https. If required, just disable this or change to
+      # `http`.
+      scheme: https
+
+      # This TLS & bearer token file config is used to connect to the actual scrape
+      # endpoints for cluster components. This is separate to discovery auth
+      # configuration because discovery & scraping are two separate concerns in
+      # Prometheus. The discovery auth config is automatic if Prometheus runs inside
+      # the cluster. Otherwise, more config options have to be provided within the
+      # <kubernetes_sd_config>.
+      tls_config:
+        ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+      bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+      kubernetes_sd_configs:
+      - role: node
+
+      relabel_configs:
+      - action: labelmap
+        regex: __meta_kubernetes_node_label_(.+)
+      - target_label: __address__
+        replacement: kubernetes.default.svc:443
+      - source_labels: [__meta_kubernetes_node_name]
+        regex: (.+)
+        target_label: __metrics_path__
+        replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
+
+    # Scrape config for service endpoints.
+    #
+    # The relabeling allows the actual service scrape endpoint to be configured
+    # via the following annotations:
+    #
+    # * `prometheus.io/scrape`: Only scrape services that have a value of `true`
+    # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
+    # to set this to `https` & most likely set the `tls_config` of the scrape config.
+    # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
+    # * `prometheus.io/port`: If the metrics are exposed on a different port to the
+    # service then set this appropriately.
+    - job_name: 'kubernetes-service-endpoints'
+
+      kubernetes_sd_configs:
+      - role: endpoints
+
+      relabel_configs:
+      - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
+        action: keep
+        regex: true
+      - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
+        action: replace
+        target_label: __scheme__
+        regex: (https?)
+      - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
+        action: replace
+        target_label: __metrics_path__
+        regex: (.+)
+      - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
+        action: replace
+        target_label: __address__
+        regex: ([^:]+)(?::\d+)?;(\d+)
+        replacement: $1:$2
+      - action: labelmap
+        regex: __meta_kubernetes_service_label_(.+)
+      - source_labels: [__meta_kubernetes_namespace]
+        action: replace
+        target_label: kubernetes_namespace
+      - source_labels: [__meta_kubernetes_service_name]
+        action: replace
+        target_label: kubernetes_name
+
+    # Example scrape config for pods
+    #
+    # The relabeling allows the actual pod scrape endpoint to be configured via the
+    # following annotations:
+    #
+    # * `prometheus.io/scrape`: Only scrape pods that have a value of `true`
+    # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
+    # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the
+    # pod's declared ports (default is a port-free target if none are declared).
+    - job_name: 'kubernetes-pods'
+      # if you want to use metrics on jobs, set the below field to
+      # true to prevent Prometheus from setting the `job` label
+      # automatically.
+      honor_labels: false
+      kubernetes_sd_configs:
+      - role: pod
+      # skip verification so you can do HTTPS to pods
+      tls_config:
+        insecure_skip_verify: true
+      # make sure your labels are in order
+      relabel_configs:
+      # these labels tell Prometheus to automatically attach source
+      # pod and namespace information to each collected sample, so
+      # that they'll be exposed in the custom metrics API automatically.
+      - source_labels: [__meta_kubernetes_namespace]
+        action: replace
+        target_label: namespace
+      - source_labels: [__meta_kubernetes_pod_name]
+        action: replace
+        target_label: pod
+      # these labels tell Prometheus to look for
+      # prometheus.io/{scrape,path,port} annotations to configure
+      # how to scrape
+      - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
+        action: keep
+        regex: true
+      - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
+        action: replace
+        target_label: __metrics_path__
+        regex: (.+)
+      - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
+        action: replace
+        regex: ([^:]+)(?::\d+)?;(\d+)
+        replacement: $1:$2
+        target_label: __address__
+      - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme]
+        action: replace
+        target_label: __scheme__
+        regex: (.+)

+ 45 - 0
k8s-custom-hpa/prometheus/prometheus-dep.yaml

@@ -0,0 +1,45 @@
+---
+apiVersion: apps/v1beta2
+kind: Deployment
+metadata:
+  name: prometheus
+  namespace: monitoring
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: prometheus
+  template:
+    metadata:
+      labels:
+        app: prometheus
+      annotations:
+        prometheus.io/scrape: 'false'
+    spec:
+      serviceAccountName: prometheus
+      containers:
+      - name: prometheus
+        image: prom/prometheus:v2.1.0
+        imagePullPolicy: Always
+        command:
+          - prometheus
+          - --config.file=/etc/prometheus/prometheus.yml
+          - --storage.tsdb.retention=1h
+        ports:
+        - containerPort: 9090
+          protocol: TCP
+        resources:
+          limits:
+            memory: 2Gi
+        volumeMounts:
+        - mountPath: /etc/prometheus/prometheus.yml
+          name: prometheus-config
+          subPath: prometheus.yml
+      volumes:
+        - name: prometheus-config
+          configMap:
+            name: prometheus-config
+            items:
+              - key: prometheus.yml
+                path: prometheus.yml
+                mode: 0644

+ 40 - 0
k8s-custom-hpa/prometheus/prometheus-rbac.yaml

@@ -0,0 +1,40 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: prometheus
+rules:
+- apiGroups: [""]
+  resources:
+  - nodes
+  - nodes/proxy
+  - services
+  - endpoints
+  - pods
+  verbs: ["get", "list", "watch"]
+- apiGroups:
+  - extensions
+  resources:
+  - ingresses
+  verbs: ["get", "list", "watch"]
+- nonResourceURLs: ["/metrics"]
+  verbs: ["get"]
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: prometheus
+  namespace: monitoring
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: prometheus
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: prometheus
+subjects:
+- kind: ServiceAccount
+  name: prometheus
+  namespace: monitoring

+ 17 - 0
k8s-custom-hpa/prometheus/prometheus-svc.yaml

@@ -0,0 +1,17 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: prometheus
+  namespace: monitoring
+  labels:
+    app: prometheus
+spec:
+  type: NodePort
+  ports:
+    - port: 9090
+      targetPort: 9090
+      nodePort: 31190
+      protocol: TCP
+  selector:
+    app: prometheus

+ 1 - 1
prometheus-operator/alertmanager.yaml

@@ -7,7 +7,7 @@ global:
   smtp_smarthost: 'smtp.exmail.qq.com:25'
   smtp_from: 'dukuan@xxx.com'
   smtp_auth_username: 'dukuan@xxx.com'
-  smtp_auth_password: 'DKhxlc2018,./!@#'
+  smtp_auth_password: 'DKxxxxZ'
   # HipChat告警配置
   # hipchat_auth_token: '123456789'
   # hipchat_auth_url: 'https://hipchat.foobar.org/'

Some files were not shown because too many files changed in this diff