Browse Source

Automatic backup at Wed Sep 19 20:20:30 UTC 2018

kube-backup 9 months ago
parent
commit
6b1a370092
36 changed files with 1462 additions and 0 deletions
  1. 16 0
      default/kubernetes.svc.yaml
  2. 18 0
      default/logger.py.configmap.yaml
  3. 39 0
      default/webserver.deployment.yaml
  4. 18 0
      default/webserver.svc.yaml
  5. 19 0
      demo/httpdocs.configmap.yaml
  6. 47 0
      demo/webserver.deployment.yaml
  7. 18 0
      demo/webserver.svc.yaml
  8. 10 0
      kube-public/cluster-info.configmap.yaml
  9. 10 0
      kube-system/coredns.configmap.yaml
  10. 94 0
      kube-system/coredns.deployment.yaml
  11. 80 0
      kube-system/extension-apiserver-authentication.configmap.yaml
  12. 25 0
      kube-system/kube-dns.svc.yaml
  13. 24 0
      kube-system/kube-proxy.configmap.yaml
  14. 71 0
      kube-system/kube-proxy.ds.yaml
  15. 57 0
      kube-system/kube-state-backup.cronjob.yaml
  16. 50 0
      kube-system/kubeadm-config.configmap.yaml
  17. 27 0
      kube-system/kubelet-config-1.11.configmap.yaml
  18. 7 0
      kube-system/kubernetes-dashboard-settings.configmap.yaml
  19. 71 0
      kube-system/kubernetes-dashboard.deployment.yaml
  20. 16 0
      kube-system/kubernetes-dashboard.svc.yaml
  21. 74 0
      kube-system/tiller-deploy.deployment.yaml
  22. 19 0
      kube-system/tiller-deploy.svc.yaml
  23. 7 0
      kube-system/weave-net.configmap.yaml
  24. 135 0
      kube-system/weave-net.ds.yaml
  25. 9 0
      metallb-system/config.configmap.yaml
  26. 63 0
      metallb-system/controller.deployment.yaml
  27. 74 0
      metallb-system/speaker.ds.yaml
  28. 68 0
      namespace.yaml
  29. 72 0
      power-monitoring/grafana.deployment.yaml
  30. 19 0
      power-monitoring/grafana.svc.yaml
  31. 49 0
      power-monitoring/influxdb.deployment.yaml
  32. 17 0
      power-monitoring/influxdb.svc.yaml
  33. 36 0
      power-monitoring/logger.py.configmap.yaml
  34. 69 0
      power-monitoring/power-monitoring.deployment.yaml
  35. 19 0
      power-monitoring/power-monitoring.svc.yaml
  36. 15 0
      storageclasses.yaml

+ 16 - 0
default/kubernetes.svc.yaml

@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    component: apiserver
+    provider: kubernetes
+  name: kubernetes
+  namespace: default
+spec:
+  ports:
+  - name: https
+    port: 443
+    protocol: TCP
+    targetPort: 6443
+  sessionAffinity: None
+  type: ClusterIP

+ 18 - 0
default/logger.py.configmap.yaml

@@ -0,0 +1,18 @@
+apiVersion: v1
+data:
+  logger.py: "import time\nfrom datetime import datetime\nimport requests\nimport\
+    \ traceback\n\nOUT = \"/data/log.csv\"\nURL = \"http://62.220.135.196:8080/sensor/{}?version=1.0&interval=minute&unit=watt\"\
+    \nSENSORS = (\n  \"34cde81adabfb1ce819eca8fea6949b6\",\n  \"b7755b5f3ec05fcdc67f449241a9912a\"\
+    ,\n  \"e67e0685f747b30d855108ab781abdfc\"\n)\n\nwhile True:\n  results = []\n\
+    \  for sensor in SENSORS:\n    try:\n      (timestamp, value) = requests.get(URL.format(sensor)).json()[-2]\n\
+    \      results.append((\",\".join((str(timestamp), sensor, str(value)))))\n  \
+    \  except Exception as e:\n      traceback.print_exc()\n\n  with open(OUT, 'a')\
+    \ as f:\n    f.write(\"\\n\".join(results) + \"\\n\")\n\n  sleeptime = 1000000\
+    \ - datetime.utcnow().microsecond\n  time.sleep(sleeptime/1000000.0)\n\n \n"
+kind: ConfigMap
+metadata:
+  annotations: {}
+  labels:
+    app: power-monitor
+  name: logger.py
+  namespace: default

+ 39 - 0
default/webserver.deployment.yaml

@@ -0,0 +1,39 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  annotations:
+    deployment.kubernetes.io/revision: '1'
+  labels:
+    run: webserver
+  name: webserver
+  namespace: default
+spec:
+  progressDeadlineSeconds: 600
+  replicas: 1
+  revisionHistoryLimit: 10
+  selector:
+    matchLabels:
+      run: webserver
+  strategy:
+    rollingUpdate:
+      maxSurge: 1
+      maxUnavailable: 1
+    type: RollingUpdate
+  template:
+    metadata:
+      creationTimestamp: null
+      labels:
+        run: webserver
+    spec:
+      containers:
+      - image: nginx
+        imagePullPolicy: Always
+        name: webserver
+        resources: {}
+        terminationMessagePath: /dev/termination-log
+        terminationMessagePolicy: File
+      dnsPolicy: ClusterFirst
+      restartPolicy: Always
+      schedulerName: default-scheduler
+      securityContext: {}
+      terminationGracePeriodSeconds: 30

+ 18 - 0
default/webserver.svc.yaml

@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    run: webserver
+  name: webserver
+  namespace: default
+spec:
+  externalTrafficPolicy: Cluster
+  ports:
+  - nodePort: 30084
+    port: 80
+    protocol: TCP
+    targetPort: 80
+  selector:
+    run: webserver
+  sessionAffinity: None
+  type: LoadBalancer

+ 19 - 0
demo/httpdocs.configmap.yaml

@@ -0,0 +1,19 @@
+apiVersion: v1
+data:
+  index.html: '<!DOCTYPE html>
+
+    <html>
+
+    <body>
+
+    <h1>Hello World!</h1>
+
+    </body>
+
+    </html>
+
+    '
+kind: ConfigMap
+metadata:
+  name: httpdocs
+  namespace: demo

+ 47 - 0
demo/webserver.deployment.yaml

@@ -0,0 +1,47 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  annotations:
+    deployment.kubernetes.io/revision: '1'
+  labels:
+    run: webserver
+  name: webserver
+  namespace: demo
+spec:
+  progressDeadlineSeconds: 600
+  replicas: 2
+  revisionHistoryLimit: 10
+  selector:
+    matchLabels:
+      run: webserver
+  strategy:
+    rollingUpdate:
+      maxSurge: 1
+      maxUnavailable: 1
+    type: RollingUpdate
+  template:
+    metadata:
+      creationTimestamp: null
+      labels:
+        run: webserver
+    spec:
+      containers:
+      - image: nginx
+        imagePullPolicy: Always
+        name: webserver
+        resources: {}
+        terminationMessagePath: /dev/termination-log
+        terminationMessagePolicy: File
+        volumeMounts:
+        - mountPath: /usr/share/nginx/html
+          name: html
+      dnsPolicy: ClusterFirst
+      restartPolicy: Always
+      schedulerName: default-scheduler
+      securityContext: {}
+      terminationGracePeriodSeconds: 30
+      volumes:
+      - configMap:
+          defaultMode: 420
+          name: httpdocs
+        name: html

+ 18 - 0
demo/webserver.svc.yaml

@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    run: webserver
+  name: webserver
+  namespace: demo
+spec:
+  externalTrafficPolicy: Cluster
+  ports:
+  - nodePort: 30218
+    port: 80
+    protocol: TCP
+    targetPort: 80
+  selector:
+    run: webserver
+  sessionAffinity: None
+  type: LoadBalancer

File diff suppressed because it is too large
+ 10 - 0
kube-public/cluster-info.configmap.yaml


+ 10 - 0
kube-system/coredns.configmap.yaml

@@ -0,0 +1,10 @@
+apiVersion: v1
+data:
+  Corefile: ".:53 {\n    errors\n    health\n    kubernetes cluster.local in-addr.arpa\
+    \ ip6.arpa {\n       pods insecure\n       upstream\n       fallthrough in-addr.arpa\
+    \ ip6.arpa\n    }\n    prometheus :9153\n    proxy . /etc/resolv.conf\n    cache\
+    \ 30\n    reload\n}\n"
+kind: ConfigMap
+metadata:
+  name: coredns
+  namespace: kube-system

+ 94 - 0
kube-system/coredns.deployment.yaml

@@ -0,0 +1,94 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  annotations:
+    deployment.kubernetes.io/revision: '3'
+  labels:
+    k8s-app: kube-dns
+  name: coredns
+  namespace: kube-system
+spec:
+  progressDeadlineSeconds: 600
+  replicas: 2
+  revisionHistoryLimit: 10
+  selector:
+    matchLabels:
+      k8s-app: kube-dns
+  strategy:
+    rollingUpdate:
+      maxSurge: 25%
+      maxUnavailable: 1
+    type: RollingUpdate
+  template:
+    metadata:
+      creationTimestamp: null
+      labels:
+        k8s-app: kube-dns
+    spec:
+      containers:
+      - args:
+        - -conf
+        - /etc/coredns/Corefile
+        image: k8s.gcr.io/coredns:1.1.3
+        imagePullPolicy: IfNotPresent
+        livenessProbe:
+          failureThreshold: 5
+          httpGet:
+            path: /health
+            port: 8080
+            scheme: HTTP
+          initialDelaySeconds: 60
+          periodSeconds: 10
+          successThreshold: 1
+          timeoutSeconds: 5
+        name: coredns
+        ports:
+        - containerPort: 53
+          name: dns
+          protocol: UDP
+        - containerPort: 53
+          name: dns-tcp
+          protocol: TCP
+        - containerPort: 9153
+          name: metrics
+          protocol: TCP
+        resources:
+          limits:
+            memory: 170Mi
+          requests:
+            cpu: 100m
+            memory: 70Mi
+        securityContext:
+          allowPrivilegeEscalation: false
+          capabilities:
+            add:
+            - NET_BIND_SERVICE
+            drop:
+            - all
+          readOnlyRootFilesystem: true
+        terminationMessagePath: /dev/termination-log
+        terminationMessagePolicy: File
+        volumeMounts:
+        - mountPath: /etc/coredns
+          name: config-volume
+          readOnly: true
+      dnsPolicy: Default
+      restartPolicy: Always
+      schedulerName: default-scheduler
+      securityContext: {}
+      serviceAccount: coredns
+      serviceAccountName: coredns
+      terminationGracePeriodSeconds: 30
+      tolerations:
+      - key: CriticalAddonsOnly
+        operator: Exists
+      - effect: NoSchedule
+        key: node-role.kubernetes.io/master
+      volumes:
+      - configMap:
+          defaultMode: 420
+          items:
+          - key: Corefile
+            path: Corefile
+          name: coredns
+        name: config-volume

+ 80 - 0
kube-system/extension-apiserver-authentication.configmap.yaml

@@ -0,0 +1,80 @@
+apiVersion: v1
+data:
+  client-ca-file: '-----BEGIN CERTIFICATE-----
+
+    MIICyDCCAbCgAwIBAgIBADANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwprdWJl
+
+    cm5ldGVzMB4XDTE4MDUwNDIwMjcyNFoXDTI4MDUwMTIwMjcyNFowFTETMBEGA1UE
+
+    AxMKa3ViZXJuZXRlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMov
+
+    x8eq5SjPVrPXOLBww61WsSjdTL9+Pk5nS0jxCVPtRLjV+RoF2jF8AIadJTNUxN8O
+
+    i8pzmlAsWPi3SrhimL5ukPtWQSx82PT9bvXw5t8q8pyi1BNvfu+DfSJgUPcLw3gC
+
+    +S1ZKW0RHh1F48J5TJN2JXcYWjMcTeA1XU152jj/WsnvIHOfXwJHGjiTKo49s77M
+
+    Zk47WwA/RLgJy3/pIfDxuEhcyeTj1q1HpI/AZpqvu11YCzDtcRrQ8LWX9BM7hpIT
+
+    y6m2ubqMLC6ZjwGDvOIdfGOZMu1i3XmOsbL9r+hYwEqH/Mhj0/TEoKhK7gojdXym
+
+    gAf9kUl+1gaD3ESpBkMCAwEAAaMjMCEwDgYDVR0PAQH/BAQDAgKkMA8GA1UdEwEB
+
+    /wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAD/WE4QGBZZ57DHL0gLUCnGlRd1a
+
+    82nhfE03PRbN9kSh1URKAig24r4DJ7Mc5roa0pP9Y0zzviqbiKv4Pymf7gyGMflZ
+
+    0nNhT8uSpWOYjLf/5mI30B3zLveUIKh0Exceq65JmUbojyn4kjXHCLtkwTSMKlBE
+
+    GgSd3r0VZL2f7rDqEQ/5jM695Kz1W4JhkgWARKnXuFamfqvrfRwjPIvAjzECJISL
+
+    IkPqOUtw9QThLCJ8+B7FfnsZlnNTEu8/XIvBtvvvpJUIpJYGujnn6VCUuuTrSgUy
+
+    +xpcvw6GAsB+9OV0HDZxWuDf1U55izQZzsR2LO3HZbV38lFsvGl1Up4qjTo=
+
+    -----END CERTIFICATE-----
+
+    '
+  requestheader-allowed-names: '["front-proxy-client"]'
+  requestheader-client-ca-file: '-----BEGIN CERTIFICATE-----
+
+    MIICyDCCAbCgAwIBAgIBADANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwprdWJl
+
+    cm5ldGVzMB4XDTE4MDUwNDIwMjcyN1oXDTI4MDUwMTIwMjcyN1owFTETMBEGA1UE
+
+    AxMKa3ViZXJuZXRlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMB9
+
+    29bqZNaXjbOOYgcLzzMiyEvqiQdhyI3rSfErYoUbC0f62Rs41p6th6KlieLi+T8z
+
+    7UqNlKieFPyngr+Xz7szxV85SysbKMgcedMJ6LBhsA4o18OLJcYGw5OKGrlq+3N7
+
+    uMzC+j01vhi1/r5Kol1tGJv2f3dwnDurnIxTn3YtQJy6VOFTcYrM3NgRmpDxHj24
+
+    dCLCiRVLSh05NAgx3Izd+onBdTl7nJyMUnVN+TOPMYZ7RH9CdaoxUObW7QiaxYJm
+
+    sCDFqHMhBymNzcbVeuuPS+WCXojLfNa5t1b6If7u9ST7JZaUL6dNZqQ37DPDnmxI
+
+    orhK3X0gcuRxXHfAI6cCAwEAAaMjMCEwDgYDVR0PAQH/BAQDAgKkMA8GA1UdEwEB
+
+    /wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAEnPqCNCG2J7mEPkQnzEEwjTwW2D
+
+    hULcKQWFiiWGtgJQm7f3fY+bH+EoN1n7q+ehBk2HlyNCammnwp5+/jrlJPBC64hN
+
+    z1fCcIrNh/7ricKNltoBxCXKht3oR4bKcxI4VLlOlUVHCwArVP1IOCh+CtHI5gYM
+
+    Pt24fc5TC56VTIRA6rbVD8Fit/l6c63TFidTqXw5Tn2ivzSWmJwnAmFqRmTXk5nj
+
+    cd0SzOAmtjv2ND7MHBNGFFFQnlW7XhBy/Y9HSzaCtl6gOSTYMTEWEBYl4vb2JNf0
+
+    YUx8xfJOcBDRlc81sgEv5bhFox48hhmFBoCEOvh+/5RKkJqYkY3UAKIAqy4=
+
+    -----END CERTIFICATE-----
+
+    '
+  requestheader-extra-headers-prefix: '["X-Remote-Extra-"]'
+  requestheader-group-headers: '["X-Remote-Group"]'
+  requestheader-username-headers: '["X-Remote-User"]'
+kind: ConfigMap
+metadata:
+  name: extension-apiserver-authentication
+  namespace: kube-system

+ 25 - 0
kube-system/kube-dns.svc.yaml

@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: Service
+metadata:
+  annotations:
+    prometheus.io/scrape: 'true'
+  labels:
+    k8s-app: kube-dns
+    kubernetes.io/cluster-service: 'true'
+    kubernetes.io/name: KubeDNS
+  name: kube-dns
+  namespace: kube-system
+spec:
+  ports:
+  - name: dns
+    port: 53
+    protocol: UDP
+    targetPort: 53
+  - name: dns-tcp
+    port: 53
+    protocol: TCP
+    targetPort: 53
+  selector:
+    k8s-app: kube-dns
+  sessionAffinity: None
+  type: ClusterIP

+ 24 - 0
kube-system/kube-proxy.configmap.yaml

@@ -0,0 +1,24 @@
+apiVersion: v1
+data:
+  config.conf: "apiVersion: kubeproxy.config.k8s.io/v1alpha1\nbindAddress: 0.0.0.0\n\
+    clientConnection:\n  acceptContentTypes: \"\"\n  burst: 10\n  contentType: application/vnd.kubernetes.protobuf\n\
+    \  kubeconfig: /var/lib/kube-proxy/kubeconfig.conf\n  qps: 5\nclusterCIDR: \"\"\
+    \nconfigSyncPeriod: 15m0s\nconntrack:\n  max: null\n  maxPerCore: 32768\n  min:\
+    \ 131072\n  tcpCloseWaitTimeout: 1h0m0s\n  tcpEstablishedTimeout: 24h0m0s\nenableProfiling:\
+    \ false\nhealthzBindAddress: 0.0.0.0:10256\nhostnameOverride: \"\"\niptables:\n\
+    \  masqueradeAll: false\n  masqueradeBit: 14\n  minSyncPeriod: 0s\n  syncPeriod:\
+    \ 30s\nipvs:\n  excludeCIDRs: null\n  minSyncPeriod: 0s\n  scheduler: \"\"\n \
+    \ syncPeriod: 30s\nkind: KubeProxyConfiguration\nmetricsBindAddress: 127.0.0.1:10249\n\
+    mode: \"\"\nnodePortAddresses: null\noomScoreAdj: -999\nportRange: \"\"\nresourceContainer:\
+    \ /kube-proxy\nudpIdleTimeout: 250ms"
+  kubeconfig.conf: "apiVersion: v1\nkind: Config\nclusters:\n- cluster:\n    certificate-authority:\
+    \ /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n    server: https://62.220.135.205:6443\n\
+    \  name: default\ncontexts:\n- context:\n    cluster: default\n    namespace:\
+    \ default\n    user: default\n  name: default\ncurrent-context: default\nusers:\n\
+    - name: default\n  user:\n    tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token"
+kind: ConfigMap
+metadata:
+  labels:
+    app: kube-proxy
+  name: kube-proxy
+  namespace: kube-system

+ 71 - 0
kube-system/kube-proxy.ds.yaml

@@ -0,0 +1,71 @@
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+  labels:
+    k8s-app: kube-proxy
+  name: kube-proxy
+  namespace: kube-system
+spec:
+  revisionHistoryLimit: 10
+  selector:
+    matchLabels:
+      k8s-app: kube-proxy
+  template:
+    metadata:
+      creationTimestamp: null
+      labels:
+        k8s-app: kube-proxy
+    spec:
+      containers:
+      - command:
+        - /usr/local/bin/kube-proxy
+        - --config=/var/lib/kube-proxy/config.conf
+        image: k8s.gcr.io/kube-proxy-amd64:v1.11.0
+        imagePullPolicy: IfNotPresent
+        name: kube-proxy
+        resources: {}
+        securityContext:
+          privileged: true
+        terminationMessagePath: /dev/termination-log
+        terminationMessagePolicy: File
+        volumeMounts:
+        - mountPath: /var/lib/kube-proxy
+          name: kube-proxy
+        - mountPath: /run/xtables.lock
+          name: xtables-lock
+        - mountPath: /lib/modules
+          name: lib-modules
+          readOnly: true
+      dnsPolicy: ClusterFirst
+      hostNetwork: true
+      nodeSelector:
+        beta.kubernetes.io/arch: amd64
+      restartPolicy: Always
+      schedulerName: default-scheduler
+      securityContext: {}
+      serviceAccount: kube-proxy
+      serviceAccountName: kube-proxy
+      terminationGracePeriodSeconds: 30
+      tolerations:
+      - key: CriticalAddonsOnly
+        operator: Exists
+      - effect: NoSchedule
+        key: node-role.kubernetes.io/master
+      volumes:
+      - configMap:
+          defaultMode: 420
+          name: kube-proxy
+        name: kube-proxy
+      - hostPath:
+          path: /run/xtables.lock
+          type: FileOrCreate
+        name: xtables-lock
+      - hostPath:
+          path: /lib/modules
+          type: ''
+        name: lib-modules
+  templateGeneration: 2
+  updateStrategy:
+    rollingUpdate:
+      maxUnavailable: 1
+    type: RollingUpdate

+ 57 - 0
kube-system/kube-state-backup.cronjob.yaml

@@ -0,0 +1,57 @@
+apiVersion: batch/v1beta1
+kind: CronJob
+metadata:
+  annotations: {}
+  labels:
+    app: kube-backup
+  name: kube-state-backup
+  namespace: kube-system
+spec:
+  concurrencyPolicy: Replace
+  failedJobsHistoryLimit: 3
+  jobTemplate:
+    metadata:
+      creationTimestamp: null
+    spec:
+      template:
+        metadata:
+          creationTimestamp: null
+          labels:
+            app: kube-backup
+          name: kube-backup
+        spec:
+          containers:
+          - env:
+            - name: GIT_REPO
+              value: ssh://gogs@git.fixme.ch:1337/kube-backup/fixme-kube-backup.git
+            - name: RESOURCETYPES
+              value: ingress deployment configmap svc rc ds thirdpartyresource networkpolicy
+                statefulset storageclass cronjob
+            image: ptlange/kube-backup:1.9.3-2
+            imagePullPolicy: Always
+            name: backup
+            resources: {}
+            terminationMessagePath: /dev/termination-log
+            terminationMessagePolicy: File
+            volumeMounts:
+            - mountPath: /backup/
+              name: cache
+            - mountPath: /backup/.ssh
+              name: sshkey
+          dnsPolicy: ClusterFirst
+          restartPolicy: OnFailure
+          schedulerName: default-scheduler
+          securityContext: {}
+          serviceAccount: kube-backup
+          serviceAccountName: kube-backup
+          terminationGracePeriodSeconds: 30
+          volumes:
+          - name: sshkey
+            secret:
+              defaultMode: 420
+              secretName: kube-backup-ssh
+          - emptyDir: {}
+            name: cache
+  schedule: '*/5 * * * *'
+  successfulJobsHistoryLimit: 3
+  suspend: false

+ 50 - 0
kube-system/kubeadm-config.configmap.yaml

@@ -0,0 +1,50 @@
+apiVersion: v1
+data:
+  MasterConfiguration: "api:\n  advertiseAddress: 62.220.135.205\n  bindPort: 6443\n\
+    \  controlPlaneEndpoint: \"\"\napiServerExtraArgs:\n  authorization-mode: Node,RBAC\n\
+    apiVersion: kubeadm.k8s.io/v1alpha2\nauditPolicy:\n  logDir: /var/log/kubernetes/audit\n\
+    \  logMaxAge: 2\n  path: \"\"\ncertificatesDir: /etc/kubernetes/pki\nclusterName:\
+    \ kubernetes\netcd:\n  local:\n    dataDir: /var/lib/etcd\n    image: \"\"\nimageRepository:\
+    \ k8s.gcr.io\nkind: MasterConfiguration\nkubeProxy:\n  config:\n    bindAddress:\
+    \ 0.0.0.0\n    clientConnection:\n      acceptContentTypes: \"\"\n      burst:\
+    \ 10\n      contentType: application/vnd.kubernetes.protobuf\n      kubeconfig:\
+    \ /var/lib/kube-proxy/kubeconfig.conf\n      qps: 5\n    clusterCIDR: \"\"\n \
+    \   configSyncPeriod: 15m0s\n    conntrack:\n      max: null\n      maxPerCore:\
+    \ 32768\n      min: 131072\n      tcpCloseWaitTimeout: 1h0m0s\n      tcpEstablishedTimeout:\
+    \ 24h0m0s\n    enableProfiling: false\n    healthzBindAddress: 0.0.0.0:10256\n\
+    \    hostnameOverride: \"\"\n    iptables:\n      masqueradeAll: false\n     \
+    \ masqueradeBit: 14\n      minSyncPeriod: 0s\n      syncPeriod: 30s\n    ipvs:\n\
+    \      excludeCIDRs: null\n      minSyncPeriod: 0s\n      scheduler: \"\"\n  \
+    \    syncPeriod: 30s\n    metricsBindAddress: 127.0.0.1:10249\n    mode: \"\"\n\
+    \    nodePortAddresses: null\n    oomScoreAdj: -999\n    portRange: \"\"\n   \
+    \ resourceContainer: /kube-proxy\n    udpIdleTimeout: 250ms\nkubeletConfiguration:\n\
+    \  baseConfig:\n    address: 0.0.0.0\n    authentication:\n      anonymous:\n\
+    \        enabled: false\n      webhook:\n        cacheTTL: 2m0s\n        enabled:\
+    \ true\n      x509:\n        clientCAFile: /etc/kubernetes/pki/ca.crt\n    authorization:\n\
+    \      mode: Webhook\n      webhook:\n        cacheAuthorizedTTL: 5m0s\n     \
+    \   cacheUnauthorizedTTL: 30s\n    cgroupDriver: cgroupfs\n    cgroupsPerQOS:\
+    \ true\n    clusterDNS:\n    - 10.96.0.10\n    clusterDomain: cluster.local\n\
+    \    containerLogMaxFiles: 5\n    containerLogMaxSize: 10Mi\n    contentType:\
+    \ application/vnd.kubernetes.protobuf\n    cpuCFSQuota: true\n    cpuManagerPolicy:\
+    \ none\n    cpuManagerReconcilePeriod: 10s\n    enableControllerAttachDetach:\
+    \ true\n    enableDebuggingHandlers: true\n    enforceNodeAllocatable:\n    -\
+    \ pods\n    eventBurst: 10\n    eventRecordQPS: 5\n    evictionHard:\n      imagefs.available:\
+    \ 15%\n      memory.available: 100Mi\n      nodefs.available: 10%\n      nodefs.inodesFree:\
+    \ 5%\n    evictionPressureTransitionPeriod: 5m0s\n    failSwapOn: true\n    fileCheckFrequency:\
+    \ 20s\n    hairpinMode: promiscuous-bridge\n    healthzBindAddress: 127.0.0.1\n\
+    \    healthzPort: 10248\n    httpCheckFrequency: 20s\n    imageGCHighThresholdPercent:\
+    \ 85\n    imageGCLowThresholdPercent: 80\n    imageMinimumGCAge: 2m0s\n    iptablesDropBit:\
+    \ 15\n    iptablesMasqueradeBit: 14\n    kubeAPIBurst: 10\n    kubeAPIQPS: 5\n\
+    \    makeIPTablesUtilChains: true\n    maxOpenFiles: 1000000\n    maxPods: 110\n\
+    \    nodeStatusUpdateFrequency: 10s\n    oomScoreAdj: -999\n    podPidsLimit:\
+    \ -1\n    port: 10250\n    registryBurst: 10\n    registryPullQPS: 5\n    resolvConf:\
+    \ /etc/resolv.conf\n    rotateCertificates: true\n    runtimeRequestTimeout: 2m0s\n\
+    \    serializeImagePulls: true\n    staticPodPath: /etc/kubernetes/manifests\n\
+    \    streamingConnectionIdleTimeout: 4h0m0s\n    syncFrequency: 1m0s\n    volumeStatsAggPeriod:\
+    \ 1m0s\nkubernetesVersion: v1.11.0\nnetworking:\n  dnsDomain: cluster.local\n\
+    \  podSubnet: \"\"\n  serviceSubnet: 10.96.0.0/12\nnodeRegistration: {}\nunifiedControlPlaneImage:\
+    \ \"\"\n"
+kind: ConfigMap
+metadata:
+  name: kubeadm-config
+  namespace: kube-system

+ 27 - 0
kube-system/kubelet-config-1.11.configmap.yaml

@@ -0,0 +1,27 @@
+apiVersion: v1
+data:
+  kubelet: "address: 0.0.0.0\napiVersion: kubelet.config.k8s.io/v1beta1\nauthentication:\n\
+    \  anonymous:\n    enabled: false\n  webhook:\n    cacheTTL: 2m0s\n    enabled:\
+    \ true\n  x509:\n    clientCAFile: /etc/kubernetes/pki/ca.crt\nauthorization:\n\
+    \  mode: Webhook\n  webhook:\n    cacheAuthorizedTTL: 5m0s\n    cacheUnauthorizedTTL:\
+    \ 30s\ncgroupDriver: cgroupfs\ncgroupsPerQOS: true\nclusterDNS:\n- 10.96.0.10\n\
+    clusterDomain: cluster.local\ncontainerLogMaxFiles: 5\ncontainerLogMaxSize: 10Mi\n\
+    contentType: application/vnd.kubernetes.protobuf\ncpuCFSQuota: true\ncpuManagerPolicy:\
+    \ none\ncpuManagerReconcilePeriod: 10s\nenableControllerAttachDetach: true\nenableDebuggingHandlers:\
+    \ true\nenforceNodeAllocatable:\n- pods\neventBurst: 10\neventRecordQPS: 5\nevictionHard:\n\
+    \  imagefs.available: 15%\n  memory.available: 100Mi\n  nodefs.available: 10%\n\
+    \  nodefs.inodesFree: 5%\nevictionPressureTransitionPeriod: 5m0s\nfailSwapOn:\
+    \ true\nfileCheckFrequency: 20s\nhairpinMode: promiscuous-bridge\nhealthzBindAddress:\
+    \ 127.0.0.1\nhealthzPort: 10248\nhttpCheckFrequency: 20s\nimageGCHighThresholdPercent:\
+    \ 85\nimageGCLowThresholdPercent: 80\nimageMinimumGCAge: 2m0s\niptablesDropBit:\
+    \ 15\niptablesMasqueradeBit: 14\nkind: KubeletConfiguration\nkubeAPIBurst: 10\n\
+    kubeAPIQPS: 5\nmakeIPTablesUtilChains: true\nmaxOpenFiles: 1000000\nmaxPods: 110\n\
+    nodeStatusUpdateFrequency: 10s\noomScoreAdj: -999\npodPidsLimit: -1\nport: 10250\n\
+    registryBurst: 10\nregistryPullQPS: 5\nresolvConf: /etc/resolv.conf\nrotateCertificates:\
+    \ true\nruntimeRequestTimeout: 2m0s\nserializeImagePulls: true\nstaticPodPath:\
+    \ /etc/kubernetes/manifests\nstreamingConnectionIdleTimeout: 4h0m0s\nsyncFrequency:\
+    \ 1m0s\nvolumeStatsAggPeriod: 1m0s\n"
+kind: ConfigMap
+metadata:
+  name: kubelet-config-1.11
+  namespace: kube-system

+ 7 - 0
kube-system/kubernetes-dashboard-settings.configmap.yaml

@@ -0,0 +1,7 @@
+apiVersion: v1
+data:
+  _global: '{"clusterName":"","itemsPerPage":10,"autoRefreshTimeInterval":5}'
+kind: ConfigMap
+metadata:
+  name: kubernetes-dashboard-settings
+  namespace: kube-system

+ 71 - 0
kube-system/kubernetes-dashboard.deployment.yaml

@@ -0,0 +1,71 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  annotations:
+    deployment.kubernetes.io/revision: '1'
+  labels:
+    k8s-app: kubernetes-dashboard
+  name: kubernetes-dashboard
+  namespace: kube-system
+spec:
+  progressDeadlineSeconds: 600
+  replicas: 1
+  revisionHistoryLimit: 10
+  selector:
+    matchLabels:
+      k8s-app: kubernetes-dashboard
+  strategy:
+    rollingUpdate:
+      maxSurge: 25%
+      maxUnavailable: 25%
+    type: RollingUpdate
+  template:
+    metadata:
+      creationTimestamp: null
+      labels:
+        k8s-app: kubernetes-dashboard
+    spec:
+      containers:
+      - args:
+        - --auto-generate-certificates
+        image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.0
+        imagePullPolicy: IfNotPresent
+        livenessProbe:
+          failureThreshold: 3
+          httpGet:
+            path: /
+            port: 8443
+            scheme: HTTPS
+          initialDelaySeconds: 30
+          periodSeconds: 10
+          successThreshold: 1
+          timeoutSeconds: 30
+        name: kubernetes-dashboard
+        ports:
+        - containerPort: 8443
+          protocol: TCP
+        resources: {}
+        terminationMessagePath: /dev/termination-log
+        terminationMessagePolicy: File
+        volumeMounts:
+        - mountPath: /certs
+          name: kubernetes-dashboard-certs
+        - mountPath: /tmp
+          name: tmp-volume
+      dnsPolicy: ClusterFirst
+      restartPolicy: Always
+      schedulerName: default-scheduler
+      securityContext: {}
+      serviceAccount: kubernetes-dashboard
+      serviceAccountName: kubernetes-dashboard
+      terminationGracePeriodSeconds: 30
+      tolerations:
+      - effect: NoSchedule
+        key: node-role.kubernetes.io/master
+      volumes:
+      - name: kubernetes-dashboard-certs
+        secret:
+          defaultMode: 420
+          secretName: kubernetes-dashboard-certs
+      - emptyDir: {}
+        name: tmp-volume

+ 16 - 0
kube-system/kubernetes-dashboard.svc.yaml

@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    k8s-app: kubernetes-dashboard
+  name: kubernetes-dashboard
+  namespace: kube-system
+spec:
+  ports:
+  - port: 443
+    protocol: TCP
+    targetPort: 8443
+  selector:
+    k8s-app: kubernetes-dashboard
+  sessionAffinity: None
+  type: ClusterIP

+ 74 - 0
kube-system/tiller-deploy.deployment.yaml

@@ -0,0 +1,74 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  annotations:
+    deployment.kubernetes.io/revision: '1'
+  labels:
+    app: helm
+    name: tiller
+  name: tiller-deploy
+  namespace: kube-system
+spec:
+  progressDeadlineSeconds: 600
+  replicas: 1
+  revisionHistoryLimit: 10
+  selector:
+    matchLabels:
+      app: helm
+      name: tiller
+  strategy:
+    rollingUpdate:
+      maxSurge: 1
+      maxUnavailable: 1
+    type: RollingUpdate
+  template:
+    metadata:
+      creationTimestamp: null
+      labels:
+        app: helm
+        name: tiller
+    spec:
+      containers:
+      - env:
+        - name: TILLER_NAMESPACE
+          value: kube-system
+        - name: TILLER_HISTORY_MAX
+          value: '0'
+        image: gcr.io/kubernetes-helm/tiller:v2.10.0
+        imagePullPolicy: IfNotPresent
+        livenessProbe:
+          failureThreshold: 3
+          httpGet:
+            path: /liveness
+            port: 44135
+            scheme: HTTP
+          initialDelaySeconds: 1
+          periodSeconds: 10
+          successThreshold: 1
+          timeoutSeconds: 1
+        name: tiller
+        ports:
+        - containerPort: 44134
+          name: tiller
+          protocol: TCP
+        - containerPort: 44135
+          name: http
+          protocol: TCP
+        readinessProbe:
+          failureThreshold: 3
+          httpGet:
+            path: /readiness
+            port: 44135
+            scheme: HTTP
+          initialDelaySeconds: 1
+          periodSeconds: 10
+          successThreshold: 1
+          timeoutSeconds: 1
+        resources: {}
+        terminationMessagePath: /dev/termination-log
+        terminationMessagePolicy: File
+      dnsPolicy: ClusterFirst
+      restartPolicy: Always
+      schedulerName: default-scheduler
+      securityContext: {}
+      terminationGracePeriodSeconds: 30

+ 19 - 0
kube-system/tiller-deploy.svc.yaml

@@ -0,0 +1,19 @@
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    app: helm
+    name: tiller
+  name: tiller-deploy
+  namespace: kube-system
+spec:
+  ports:
+  - name: tiller
+    port: 44134
+    protocol: TCP
+    targetPort: tiller
+  selector:
+    app: helm
+    name: tiller
+  sessionAffinity: None
+  type: ClusterIP

+ 7 - 0
kube-system/weave-net.configmap.yaml

@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  annotations:
+    kube-peers.weave.works/peers: '{"Peers":[{"PeerName":"d6:00:6a:e5:aa:39","NodeName":"k8s"}]}'
+  name: weave-net
+  namespace: kube-system

File diff suppressed because it is too large
+ 135 - 0
kube-system/weave-net.ds.yaml


+ 9 - 0
metallb-system/config.configmap.yaml

@@ -0,0 +1,9 @@
+apiVersion: v1
+data:
+  config: "address-pools:\n- name: default\n  protocol: arp\n  arp-network: 62.220.135.192/26\n\
+    \  cidr:\n  - 62.220.135.216/29\n"
+kind: ConfigMap
+metadata:
+  annotations: {}
+  name: config
+  namespace: metallb-system

+ 63 - 0
metallb-system/controller.deployment.yaml

@@ -0,0 +1,63 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  annotations:
+    deployment.kubernetes.io/revision: '1'
+    prometheus.io/port: '7472'
+    prometheus.io/scrape: 'true'
+  labels:
+    app: metallb
+    component: controller
+  name: controller
+  namespace: metallb-system
+spec:
+  progressDeadlineSeconds: 600
+  replicas: 1
+  revisionHistoryLimit: 3
+  selector:
+    matchLabels:
+      app: metallb
+      component: controller
+  strategy:
+    rollingUpdate:
+      maxSurge: 25%
+      maxUnavailable: 25%
+    type: RollingUpdate
+  template:
+    metadata:
+      creationTimestamp: null
+      labels:
+        app: metallb
+        component: controller
+    spec:
+      containers:
+      - args:
+        - --port=7472
+        image: metallb/controller:v0.4.5
+        imagePullPolicy: IfNotPresent
+        name: controller
+        ports:
+        - containerPort: 7472
+          name: monitoring
+          protocol: TCP
+        resources:
+          limits:
+            cpu: 100m
+            memory: 100Mi
+        securityContext:
+          allowPrivilegeEscalation: false
+          capabilities:
+            drop:
+            - all
+          readOnlyRootFilesystem: true
+        terminationMessagePath: /dev/termination-log
+        terminationMessagePolicy: File
+      dnsPolicy: ClusterFirst
+      restartPolicy: Always
+      schedulerName: default-scheduler
+      securityContext:
+        runAsNonRoot: true
+        runAsUser: 65534
+      serviceAccount: controller
+      serviceAccountName: controller
+      terminationGracePeriodSeconds: 0

+ 74 - 0
metallb-system/speaker.ds.yaml

@@ -0,0 +1,74 @@
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+  annotations: {}
+  labels:
+    app: metallb
+    component: speaker
+  name: speaker
+  namespace: metallb-system
+spec:
+  revisionHistoryLimit: 10
+  selector:
+    matchLabels:
+      app: metallb
+      component: speaker
+  template:
+    metadata:
+      annotations:
+        prometheus.io/port: '7472'
+        prometheus.io/scrape: 'true'
+      creationTimestamp: null
+      labels:
+        app: metallb
+        component: speaker
+    spec:
+      containers:
+      - args:
+        - --port=7472
+        env:
+        - name: METALLB_NODE_IP
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: status.hostIP
+        - name: METALLB_NODE_NAME
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: spec.nodeName
+        image: metallb/speaker:v0.4.5
+        imagePullPolicy: IfNotPresent
+        name: speaker
+        ports:
+        - containerPort: 7472
+          hostPort: 7472
+          name: monitoring
+          protocol: TCP
+        resources:
+          limits:
+            cpu: 100m
+            memory: 100Mi
+        securityContext:
+          allowPrivilegeEscalation: false
+          capabilities:
+            add:
+            - net_raw
+            drop:
+            - all
+          readOnlyRootFilesystem: true
+        terminationMessagePath: /dev/termination-log
+        terminationMessagePolicy: File
+      dnsPolicy: ClusterFirst
+      hostNetwork: true
+      restartPolicy: Always
+      schedulerName: default-scheduler
+      securityContext: {}
+      serviceAccount: speaker
+      serviceAccountName: speaker
+      terminationGracePeriodSeconds: 0
+  templateGeneration: 1
+  updateStrategy:
+    rollingUpdate:
+      maxUnavailable: 1
+    type: RollingUpdate

+ 68 - 0
namespace.yaml

@@ -0,0 +1,68 @@
+apiVersion: v1
+items:
+- apiVersion: v1
+  kind: Namespace
+  metadata:
+    name: default
+    namespace: ''
+  spec:
+    finalizers:
+    - kubernetes
+  status:
+    phase: Active
+- apiVersion: v1
+  kind: Namespace
+  metadata:
+    name: demo
+    namespace: ''
+  spec:
+    finalizers:
+    - kubernetes
+  status:
+    phase: Active
+- apiVersion: v1
+  kind: Namespace
+  metadata:
+    name: kube-public
+    namespace: ''
+  spec:
+    finalizers:
+    - kubernetes
+  status:
+    phase: Active
+- apiVersion: v1
+  kind: Namespace
+  metadata:
+    name: kube-system
+    namespace: ''
+  spec:
+    finalizers:
+    - kubernetes
+  status:
+    phase: Active
+- apiVersion: v1
+  kind: Namespace
+  metadata:
+    annotations: {}
+    name: metallb-system
+    namespace: ''
+  spec:
+    finalizers:
+    - kubernetes
+  status:
+    phase: Active
+- apiVersion: v1
+  kind: Namespace
+  metadata:
+    annotations: {}
+    name: power-monitoring
+    namespace: ''
+  spec:
+    finalizers:
+    - kubernetes
+  status:
+    phase: Active
+kind: List
+metadata:
+  resourceVersion: ''
+  selfLink: ''

+ 72 - 0
power-monitoring/grafana.deployment.yaml

@@ -0,0 +1,72 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  annotations:
+    deployment.kubernetes.io/revision: '4'
+  labels:
+    k8s-app: grafana
+    task: monitoring
+  name: grafana
+  namespace: power-monitoring
+spec:
+  progressDeadlineSeconds: 600
+  replicas: 1
+  revisionHistoryLimit: 10
+  selector:
+    matchLabels:
+      k8s-app: grafana
+      task: monitoring
+  strategy:
+    rollingUpdate:
+      maxSurge: 1
+      maxUnavailable: 1
+    type: RollingUpdate
+  template:
+    metadata:
+      creationTimestamp: null
+      labels:
+        k8s-app: grafana
+        task: monitoring
+    spec:
+      containers:
+      - env:
+        - name: INFLUXDB_HOST
+          value: influxdb
+        - name: GF_SERVER_HTTP_PORT
+          value: '3000'
+        - name: GF_AUTH_BASIC_ENABLED
+          value: 'false'
+        - name: GF_AUTH_ANONYMOUS_ENABLED
+          value: 'true'
+        - name: GF_AUTH_ANONYMOUS_ORG_ROLE
+          value: Viewer
+        - name: GF_SERVER_ROOT_URL
+          value: /
+        image: k8s.gcr.io/heapster-grafana-amd64:v4.4.3
+        imagePullPolicy: IfNotPresent
+        name: grafana
+        ports:
+        - containerPort: 3000
+          protocol: TCP
+        resources: {}
+        terminationMessagePath: /dev/termination-log
+        terminationMessagePolicy: File
+        volumeMounts:
+        - mountPath: /etc/ssl/certs
+          name: ca-certificates
+          readOnly: true
+        - mountPath: /var
+          name: grafana-storage
+      dnsPolicy: ClusterFirst
+      restartPolicy: Always
+      schedulerName: default-scheduler
+      securityContext: {}
+      terminationGracePeriodSeconds: 30
+      volumes:
+      - hostPath:
+          path: /etc/ssl/certs
+          type: ''
+        name: ca-certificates
+      - name: grafana-storage
+        persistentVolumeClaim:
+          claimName: grafana-data

+ 19 - 0
power-monitoring/grafana.svc.yaml

@@ -0,0 +1,19 @@
+apiVersion: v1
+kind: Service
+metadata:
+  annotations: {}
+  labels:
+    kubernetes.io/name: monitoring-grafana
+  name: grafana
+  namespace: power-monitoring
+spec:
+  externalTrafficPolicy: Cluster
+  ports:
+  - nodePort: 31146
+    port: 80
+    protocol: TCP
+    targetPort: 3000
+  selector:
+    k8s-app: grafana
+  sessionAffinity: None
+  type: LoadBalancer

+ 49 - 0
power-monitoring/influxdb.deployment.yaml

@@ -0,0 +1,49 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  annotations:
+    deployment.kubernetes.io/revision: '1'
+  labels:
+    k8s-app: influxdb
+    task: monitoring
+  name: influxdb
+  namespace: power-monitoring
+spec:
+  progressDeadlineSeconds: 600
+  replicas: 1
+  revisionHistoryLimit: 10
+  selector:
+    matchLabels:
+      k8s-app: influxdb
+      task: monitoring
+  strategy:
+    rollingUpdate:
+      maxSurge: 1
+      maxUnavailable: 1
+    type: RollingUpdate
+  template:
+    metadata:
+      creationTimestamp: null
+      labels:
+        k8s-app: influxdb
+        task: monitoring
+    spec:
+      containers:
+      - image: k8s.gcr.io/heapster-influxdb-amd64:v1.3.3
+        imagePullPolicy: IfNotPresent
+        name: influxdb
+        resources: {}
+        terminationMessagePath: /dev/termination-log
+        terminationMessagePolicy: File
+        volumeMounts:
+        - mountPath: /data
+          name: influxdb-storage
+      dnsPolicy: ClusterFirst
+      restartPolicy: Always
+      schedulerName: default-scheduler
+      securityContext: {}
+      terminationGracePeriodSeconds: 30
+      volumes:
+      - name: influxdb-storage
+        persistentVolumeClaim:
+          claimName: power-monitoring-influxdb

+ 17 - 0
power-monitoring/influxdb.svc.yaml

@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+  annotations: {}
+  labels:
+    task: monitoring
+  name: influxdb
+  namespace: power-monitoring
+spec:
+  ports:
+  - port: 8086
+    protocol: TCP
+    targetPort: 8086
+  selector:
+    k8s-app: influxdb
+  sessionAffinity: None
+  type: ClusterIP

+ 36 - 0
power-monitoring/logger.py.configmap.yaml

@@ -0,0 +1,36 @@
+apiVersion: v1
+data:
+  logger.py: "#!/usr/bin/env python3\n\nimport time\nfrom datetime import datetime\n\
+    import requests\nimport traceback\nfrom influxdb import InfluxDBClient\n\nOUT\
+    \ = \"/data/log.csv\"\nURL = \"http://62.220.135.196:8080/sensor/{}?version=1.0&interval=minute&unit=watt\"\
+    \nSENSORS = (\n  \"34cde81adabfb1ce819eca8fea6949b6\",\n  \"b7755b5f3ec05fcdc67f449241a9912a\"\
+    ,\n  \"e67e0685f747b30d855108ab781abdfc\"\n)\n\nprint(\"Starting logger\")\ndb\
+    \ = InfluxDBClient('influxdb', database='power')\nprint(\"Connected to %s\" %\
+    \ db)\n\nwhile True:\n  points = []\n  last_timestamps = {}\n\n  for sensor in\
+    \ SENSORS:\n    try:\n      last_timestamp = last_timestamps.get(sensor, 0)\n\
+    \      flukso = requests.get(URL.format(sensor)).json()\n      print(\"Before\
+    \ filtering: \" + str(len(flukso)))\n\n      # Remove NaN values\n      flukso\
+    \ = filter(lambda x: x[1] != 'nan', flukso)\n\n      # Remove values that were\
+    \ already seen before\n      flukso = list(filter(lambda x: x[0] >= last_timestamp,\
+    \ flukso))\n\n      if len(flukso) > 0:\n        last_timestamp = flukso[-1][0]\n\
+    \n      print(\"After filtering: \" + str(len(flukso)))\n\n      for timestamp,\
+    \ value in flukso:\n        points.append((timestamp, sensor, value))\n    except\
+    \ Exception as e:\n      traceback.print_exc()\n\n  try:\n    db.write_points([{\n\
+    \             \"measurement\": \"flukso\",\n             \"time\": datetime.fromtimestamp(timestamp).isoformat(),\n\
+    \             \"tags\": {\n                \"sensor\": sensor,\n             },\n\
+    \             \"fields\": { \"watts\": float(value) }\n           } for timestamp,\
+    \ sensor, value in points])\n    print(\"Wrote %d samples to influxdb\" % len(points))\n\
+    \  except Exception as e:\n    print(\"Write to influxdb failed\")\n    traceback.print_exc()\n\
+    \n  try:\n    with open(OUT, 'a') as f:\n      for timestamp, sensor, value in\
+    \ points:\n        f.write(\",\".join((str(timestamp), sensor, str(value))) +\
+    \ \"\\n\")\n    print(\"Wrote %d samples to csv\" % len(points))\n  except Exception\
+    \ as e:\n    print(\"Write to csv file failed\")\n    traceback.print_exc()\n\n\
+    \  sleeptime = 45 * 1000000 - datetime.utcnow().microsecond\n  print(\"Sleeping\
+    \ %d us\" % sleeptime)\n  time.sleep(sleeptime/1000000.0)\n"
+kind: ConfigMap
+metadata:
+  annotations: {}
+  labels:
+    app: power-monitor
+  name: logger.py
+  namespace: power-monitoring

+ 69 - 0
power-monitoring/power-monitoring.deployment.yaml

@@ -0,0 +1,69 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  annotations:
+    deployment.kubernetes.io/revision: '5'
+  labels:
+    app: power-monitoring
+  name: power-monitoring
+  namespace: power-monitoring
+spec:
+  progressDeadlineSeconds: 600
+  replicas: 1
+  revisionHistoryLimit: 10
+  selector:
+    matchLabels:
+      app: power-monitoring
+  strategy:
+    rollingUpdate:
+      maxSurge: 0
+      maxUnavailable: 1
+    type: RollingUpdate
+  template:
+    metadata:
+      creationTimestamp: null
+      labels:
+        app: power-monitoring
+      name: power-monitoring
+    spec:
+      containers:
+      - command:
+        - sh
+        - -c
+        - pip install requests influxdb && python /code/logger.py
+        image: python:3
+        imagePullPolicy: IfNotPresent
+        name: data-logger
+        resources: {}
+        terminationMessagePath: /dev/termination-log
+        terminationMessagePolicy: File
+        volumeMounts:
+        - mountPath: /data
+          name: data
+        - mountPath: /code
+          name: code
+      - image: nginx
+        imagePullPolicy: Always
+        name: webserver
+        ports:
+        - containerPort: 80
+          protocol: TCP
+        resources: {}
+        terminationMessagePath: /dev/termination-log
+        terminationMessagePolicy: File
+        volumeMounts:
+        - mountPath: /usr/share/nginx/html
+          name: data
+      dnsPolicy: ClusterFirst
+      restartPolicy: Always
+      schedulerName: default-scheduler
+      securityContext: {}
+      terminationGracePeriodSeconds: 30
+      volumes:
+      - name: data
+        persistentVolumeClaim:
+          claimName: power-monitoring-data
+      - configMap:
+          defaultMode: 420
+          name: logger.py
+        name: code

+ 19 - 0
power-monitoring/power-monitoring.svc.yaml

@@ -0,0 +1,19 @@
+apiVersion: v1
+kind: Service
+metadata:
+  annotations: {}
+  labels:
+    app: power-monitoring
+  name: power-monitoring
+  namespace: power-monitoring
+spec:
+  externalTrafficPolicy: Cluster
+  ports:
+  - nodePort: 31165
+    port: 80
+    protocol: TCP
+    targetPort: 80
+  selector:
+    app: power-monitoring
+  sessionAffinity: None
+  type: LoadBalancer

+ 15 - 0
storageclasses.yaml

@@ -0,0 +1,15 @@
+apiVersion: v1
+items:
+- apiVersion: storage.k8s.io/v1
+  kind: StorageClass
+  metadata:
+    annotations: {}
+    name: local-storage
+    namespace: ''
+  provisioner: kubernetes.io/no-provisioner
+  reclaimPolicy: Delete
+  volumeBindingMode: WaitForFirstConsumer
+kind: List
+metadata:
+  resourceVersion: ''
+  selfLink: ''