aboutsummaryrefslogtreecommitdiffstats
path: root/resources/templates/kubernetes
diff options
context:
space:
mode:
Diffstat (limited to 'resources/templates/kubernetes')
-rw-r--r--resources/templates/kubernetes/calico_v2.4.1.yaml387
-rw-r--r--resources/templates/kubernetes/csit.yaml4
-rw-r--r--resources/templates/kubernetes/etcd.yaml25
-rw-r--r--resources/templates/kubernetes/eth-l2bdbase-eth-2memif-1vnf.yaml206
-rw-r--r--resources/templates/kubernetes/eth-l2xcbase-eth-2memif-1vnf.yaml206
-rw-r--r--resources/templates/kubernetes/kafka.yaml22
6 files changed, 850 insertions, 0 deletions
diff --git a/resources/templates/kubernetes/calico_v2.4.1.yaml b/resources/templates/kubernetes/calico_v2.4.1.yaml
new file mode 100644
index 0000000000..921e6923ca
--- /dev/null
+++ b/resources/templates/kubernetes/calico_v2.4.1.yaml
@@ -0,0 +1,387 @@
+# Calico Version v2.4.1
+# https://docs.projectcalico.org/v2.4/releases#v2.4.1
+# This manifest includes the following component versions:
+# calico/node:v2.4.1
+# calico/cni:v1.10.0
+# calico/kube-policy-controller:v0.7.0
+
+# This ConfigMap is used to configure a self-hosted Calico installation.
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: calico-config
+ namespace: kube-system
+data:
+ # The location of your etcd cluster. This uses the Service clusterIP
+ # defined below.
+ etcd_endpoints: "http://10.96.232.136:6666"
+
+ # Configure the Calico backend to use.
+ calico_backend: "bird"
+
+ # The CNI network configuration to install on each node.
+ cni_network_config: |-
+ {
+ "name": "k8s-pod-network",
+ "cniVersion": "0.1.0",
+ "type": "calico",
+ "etcd_endpoints": "__ETCD_ENDPOINTS__",
+ "log_level": "info",
+ "mtu": 1500,
+ "ipam": {
+ "type": "calico-ipam"
+ },
+ "policy": {
+ "type": "k8s",
+ "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
+ "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
+ },
+ "kubernetes": {
+ "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
+ }
+ }
+
+---
+
+# This manifest installs the Calico etcd on the kubeadm master. This uses a DaemonSet
+# to force it to run on the master even when the master isn't schedulable, and uses
+# nodeSelector to ensure it only runs on the master.
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: calico-etcd
+ namespace: kube-system
+ labels:
+ k8s-app: calico-etcd
+spec:
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-etcd
+ annotations:
+ # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
+ # reserves resources for critical add-on pods so that they can be rescheduled after
+ # a failure. This annotation works in tandem with the toleration below.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ # Only run this pod on the master.
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
+ # This, along with the annotation above marks this pod as a critical add-on.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ nodeSelector:
+ node-role.kubernetes.io/master: ""
+ hostNetwork: true
+ containers:
+ - name: calico-etcd
+ image: quay.io/coreos/etcd:v3.1.10
+ env:
+ - name: CALICO_ETCD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ command: ["/bin/sh","-c"]
+ args: ["/usr/local/bin/etcd --name=calico --data-dir=/var/etcd/calico-data --advertise-client-urls=http://$CALICO_ETCD_IP:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667"]
+ volumeMounts:
+ - name: var-etcd
+ mountPath: /var/etcd
+ volumes:
+ - name: var-etcd
+ hostPath:
+ path: /var/etcd
+
+---
+
+# This manifest installs the Service which gets traffic to the Calico
+# etcd.
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ k8s-app: calico-etcd
+ name: calico-etcd
+ namespace: kube-system
+spec:
+ # Select the calico-etcd pod running on the master.
+ selector:
+ k8s-app: calico-etcd
+ # This ClusterIP needs to be known in advance, since we cannot rely
+ # on DNS to get access to etcd.
+ clusterIP: 10.96.232.136
+ ports:
+ - port: 6666
+
+---
+
+# This manifest installs the calico/node container, as well
+# as the Calico CNI plugins and network config on
+# each master and worker node in a Kubernetes cluster.
+kind: DaemonSet
+apiVersion: extensions/v1beta1
+metadata:
+ name: calico-node
+ namespace: kube-system
+ labels:
+ k8s-app: calico-node
+spec:
+ selector:
+ matchLabels:
+ k8s-app: calico-node
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-node
+ annotations:
+ # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
+ # reserves resources for critical add-on pods so that they can be rescheduled after
+ # a failure. This annotation works in tandem with the toleration below.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ hostNetwork: true
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
+ # This, along with the annotation above marks this pod as a critical add-on.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ serviceAccountName: calico-cni-plugin
+ containers:
+ # Runs calico/node container on each Kubernetes node. This
+ # container programs network policy and routes on each
+ # host.
+ - name: calico-node
+ image: quay.io/calico/node:v2.4.1
+ env:
+ # The location of the Calico etcd cluster.
+ - name: ETCD_ENDPOINTS
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_endpoints
+ # Enable BGP. Disable to enforce policy only.
+ - name: CALICO_NETWORKING_BACKEND
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: calico_backend
+ # Cluster type to identify the deployment type
+ - name: CLUSTER_TYPE
+ value: "kubeadm,bgp"
+ # Disable file logging so `kubectl logs` works.
+ - name: CALICO_DISABLE_FILE_LOGGING
+ value: "true"
+ # Set Felix endpoint to host default action to ACCEPT.
+ - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
+ value: "ACCEPT"
+ # Configure the IP Pool from which Pod IPs will be chosen.
+ - name: CALICO_IPV4POOL_CIDR
+ value: "192.168.0.0/16"
+ - name: CALICO_IPV4POOL_IPIP
+ value: "always"
+ # Disable IPv6 on Kubernetes.
+ - name: FELIX_IPV6SUPPORT
+ value: "false"
+ # Set MTU for tunnel device used if ipip is enabled
+ - name: FELIX_IPINIPMTU
+ value: "1440"
+ # Set Felix logging to "info"
+ - name: FELIX_LOGSEVERITYSCREEN
+ value: "info"
+ - name: FELIX_HEALTHENABLED
+ value: "true"
+ # Auto-detect the BGP IP address.
+ - name: IP
+ value: ""
+ securityContext:
+ privileged: true
+ resources:
+ requests:
+ cpu: 250m
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9099
+ periodSeconds: 10
+ initialDelaySeconds: 10
+ failureThreshold: 6
+ readinessProbe:
+ httpGet:
+ path: /readiness
+ port: 9099
+ periodSeconds: 10
+ volumeMounts:
+ - mountPath: /lib/modules
+ name: lib-modules
+ readOnly: true
+ - mountPath: /var/run/calico
+ name: var-run-calico
+ readOnly: false
+ # This container installs the Calico CNI binaries
+ # and CNI network config file on each node.
+ - name: install-cni
+ image: quay.io/calico/cni:v1.10.0
+ command: ["/install-cni.sh"]
+ env:
+ # The location of the Calico etcd cluster.
+ - name: ETCD_ENDPOINTS
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_endpoints
+ # The CNI network config to install on each node.
+ - name: CNI_NETWORK_CONFIG
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: cni_network_config
+ volumeMounts:
+ - mountPath: /host/opt/cni/bin
+ name: cni-bin-dir
+ - mountPath: /host/etc/cni/net.d
+ name: cni-net-dir
+ volumes:
+ # Used by calico/node.
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: var-run-calico
+ hostPath:
+ path: /var/run/calico
+ # Used to install CNI.
+ - name: cni-bin-dir
+ hostPath:
+ path: /opt/cni/bin
+ - name: cni-net-dir
+ hostPath:
+ path: /etc/cni/net.d
+
+---
+
+# This manifest deploys the Calico policy controller on Kubernetes.
+# See https://github.com/projectcalico/k8s-policy
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: calico-policy-controller
+ namespace: kube-system
+ labels:
+ k8s-app: calico-policy
+spec:
+ # The policy controller can only have a single active instance.
+ replicas: 1
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: calico-policy-controller
+ namespace: kube-system
+ labels:
+ k8s-app: calico-policy-controller
+ annotations:
+ # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
+ # reserves resources for critical add-on pods so that they can be rescheduled after
+ # a failure. This annotation works in tandem with the toleration below.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ # The policy controller must run in the host network namespace so that
+ # it isn't governed by policy that would prevent it from working.
+ hostNetwork: true
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
+ # This, along with the annotation above marks this pod as a critical add-on.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ serviceAccountName: calico-policy-controller
+ containers:
+ - name: calico-policy-controller
+ image: quay.io/calico/kube-policy-controller:v0.7.0
+ env:
+ # The location of the Calico etcd cluster.
+ - name: ETCD_ENDPOINTS
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_endpoints
+ # The location of the Kubernetes API. Use the default Kubernetes
+ # service for API access.
+ - name: K8S_API
+ value: "https://kubernetes.default:443"
+ # Since we're running in the host namespace and might not have KubeDNS
+ # access, configure the container's /etc/hosts to resolve
+ # kubernetes.default to the correct service clusterIP.
+ - name: CONFIGURE_ETC_HOSTS
+ value: "true"
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: calico-cni-plugin
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-cni-plugin
+subjects:
+- kind: ServiceAccount
+ name: calico-cni-plugin
+ namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: calico-cni-plugin
+ namespace: kube-system
+rules:
+ - apiGroups: [""]
+ resources:
+ - pods
+ - nodes
+ verbs:
+ - get
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-cni-plugin
+ namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: calico-policy-controller
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-policy-controller
+subjects:
+- kind: ServiceAccount
+ name: calico-policy-controller
+ namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: calico-policy-controller
+ namespace: kube-system
+rules:
+ - apiGroups:
+ - ""
+ - extensions
+ resources:
+ - pods
+ - namespaces
+ - networkpolicies
+ verbs:
+ - watch
+ - list
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-policy-controller
+ namespace: kube-system
diff --git a/resources/templates/kubernetes/csit.yaml b/resources/templates/kubernetes/csit.yaml
new file mode 100644
index 0000000000..4ae72063e7
--- /dev/null
+++ b/resources/templates/kubernetes/csit.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: csit
diff --git a/resources/templates/kubernetes/etcd.yaml b/resources/templates/kubernetes/etcd.yaml
new file mode 100644
index 0000000000..66c1a57fad
--- /dev/null
+++ b/resources/templates/kubernetes/etcd.yaml
@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: etcdv3-server
+ namespace: csit
+spec:
+ hostNetwork: true
+ containers:
+ - image: quay.io/coreos/etcd:v3.0.16
+ name: etcdv3
+ command:
+ - /usr/local/bin/etcd
+ - --advertise-client-urls
+ - http://0.0.0.0:22379
+ - --listen-client-urls
+ - http://0.0.0.0:22379
+ - --listen-peer-urls
+ - http://0.0.0.0:22380
+ ports:
+ - containerPort: 22379
+ hostPort: 22379
+ name: serverport
+ env:
+ - name: ETCDCTL_API
+ value: "3"
diff --git a/resources/templates/kubernetes/eth-l2bdbase-eth-2memif-1vnf.yaml b/resources/templates/kubernetes/eth-l2bdbase-eth-2memif-1vnf.yaml
new file mode 100644
index 0000000000..726e9734ab
--- /dev/null
+++ b/resources/templates/kubernetes/eth-l2bdbase-eth-2memif-1vnf.yaml
@@ -0,0 +1,206 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: sfc-controller-cfg
+ namespace: csit
+data:
+ etcd.conf:
+ insecure-transport: true
+ dial-timeout: 1000000000
+ endpoints:
+ - "172.17.0.1:22379"
+
+ sfc.conf:
+ sfc_controller_config_version: 1
+ description: $$TEST_NAME$$
+ host_entities:
+ - name: vswitch
+ sfc_entities:
+ - name: vswitch-vnf1
+ description: vswitch to VNF1 - memif
+ type: 3
+ elements:
+ - container: vswitch
+ port_label: $$VSWITCH_IF1$$
+ etcd_vpp_switch_key: vswitch
+ type: 5
+ - container: vnf1
+ port_label: port1
+ etcd_vpp_switch_key: vswitch
+ type: 2
+ - name: vnf1-vswitch
+ description: VNF1 to vswitch - memif
+ type: 3
+ elements:
+ - container: vswitch
+ port_label: $$VSWITCH_IF2$$
+ etcd_vpp_switch_key: vswitch
+ type: 5
+ - container: vnf1
+ port_label: port2
+ etcd_vpp_switch_key: vswitch
+ type: 2
+
+ vnf.conf:
+ vnf_plugin_config_version: 1
+ description: VNF config
+ vnf_entities:
+ - name: vnf1
+ container: vnf1
+ l2xconnects:
+ - port_labels:
+ - port1
+ - port2
+
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: sfc-controller
+ namespace: csit
+spec:
+ containers:
+ - name: "sfc-controller"
+ image: prod_sfc_controller
+ imagePullPolicy: IfNotPresent
+ command:
+ - /bin/sfc-controller
+ - -etcdv3-config=/opt/sfc-controller/dev/etcd.conf
+ - -sfc-config=/opt/sfc-controller/dev/sfc.conf
+ - -vnf-config=/opt/sfc-controller/dev/vnf.conf
+ volumeMounts:
+ - name: controller-config
+ mountPath: /opt/sfc-controller/dev
+ volumes:
+ - name: controller-config
+ configMap:
+ name: sfc-controller-cfg
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: vswitch-agent-cfg
+ namespace: csit
+data:
+ etcd.conf:
+ insecure-transport: true
+ dial-timeout: 1000000000
+ endpoints:
+ - "172.17.0.1:22379"
+
+ kafka.conf:
+ addrs:
+ - "172.17.0.1:9092"
+
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: vswitch-vpp
+ namespace: csit
+spec:
+ hostNetwork: true
+ containers:
+ - name: "vswitch"
+ image: prod_vpp_agent_shrink
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ ports:
+ - containerPort: 5002
+ - containerPort: 9191
+ readinessProbe:
+ httpGet:
+ path: /readiness
+ port: 9191
+ periodSeconds: 1
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9191
+ periodSeconds: 1
+ initialDelaySeconds: 15
+ env:
+ - name: MICROSERVICE_LABEL
+ value: vswitch
+ volumeMounts:
+ - name: vpp-config
+ mountPath: /etc/vpp
+ - name: agent-config
+ mountPath: /opt/vpp-agent/dev
+ - name: memif-sockets
+ mountPath: /tmp
+ volumes:
+ - name: vpp-config
+ configMap:
+ name: vswitch-vpp-cfg
+ - name: agent-config
+ configMap:
+ name: vswitch-agent-cfg
+ - name: memif-sockets
+ hostPath:
+ path: /tmp
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: vnf-agent-cfg
+ namespace: csit
+data:
+ etcd.conf:
+ insecure-transport: true
+ dial-timeout: 1000000000
+ endpoints:
+ - "172.17.0.1:22379"
+
+ kafka.conf:
+ addrs:
+ - "172.17.0.1:9092"
+
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: vnf-vpp
+ namespace: csit
+spec:
+ containers:
+ - name: "vnf"
+ image: prod_vpp_agent_shrink
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ ports:
+ - containerPort: 5002
+ - containerPort: 9191
+ readinessProbe:
+ httpGet:
+ path: /readiness
+ port: 9191
+ periodSeconds: 1
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9191
+ initialDelaySeconds: 15
+ env:
+ - name: MICROSERVICE_LABEL
+ value: vnf1
+ volumeMounts:
+ - name: vpp-config
+ mountPath: /etc/vpp
+ - name: agent-config
+ mountPath: /opt/vpp-agent/dev
+ - name: memif-sockets
+ mountPath: /tmp
+ volumes:
+ - name: vpp-config
+ configMap:
+ name: vnf-vpp-cfg
+ - name: agent-config
+ configMap:
+ name: vnf-agent-cfg
+ - name: memif-sockets
+ hostPath:
+ path: /tmp
diff --git a/resources/templates/kubernetes/eth-l2xcbase-eth-2memif-1vnf.yaml b/resources/templates/kubernetes/eth-l2xcbase-eth-2memif-1vnf.yaml
new file mode 100644
index 0000000000..7514eeb181
--- /dev/null
+++ b/resources/templates/kubernetes/eth-l2xcbase-eth-2memif-1vnf.yaml
@@ -0,0 +1,206 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: sfc-controller-cfg
+ namespace: csit
+data:
+ etcd.conf:
+ insecure-transport: true
+ dial-timeout: 1000000000
+ endpoints:
+ - "172.17.0.1:22379"
+
+ sfc.conf:
+ sfc_controller_config_version: 1
+ description: $$TEST_NAME$$
+ host_entities:
+ - name: vswitch
+ sfc_entities:
+ - name: vswitch-vnf1
+ description: vswitch to VNF1 - memif
+ type: 4
+ elements:
+ - container: vswitch
+ port_label: $$VSWITCH_IF1$$
+ etcd_vpp_switch_key: vswitch
+ type: 5
+ - container: vnf1
+ port_label: port1
+ etcd_vpp_switch_key: vswitch
+ type: 2
+ - name: vnf1-vswitch
+ description: VNF1 to vswitch - memif
+ type: 4
+ elements:
+ - container: vswitch
+ port_label: $$VSWITCH_IF2$$
+ etcd_vpp_switch_key: vswitch
+ type: 5
+ - container: vnf1
+ port_label: port2
+ etcd_vpp_switch_key: vswitch
+ type: 2
+
+ vnf.conf:
+ vnf_plugin_config_version: 1
+ description: VNF config
+ vnf_entities:
+ - name: vnf1
+ container: vnf1
+ l2xconnects:
+ - port_labels:
+ - port1
+ - port2
+
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: sfc-controller
+ namespace: csit
+spec:
+ containers:
+ - name: "sfc-controller"
+ image: prod_sfc_controller
+ imagePullPolicy: IfNotPresent
+ command:
+ - /bin/sfc-controller
+ - -etcdv3-config=/opt/sfc-controller/dev/etcd.conf
+ - -sfc-config=/opt/sfc-controller/dev/sfc.conf
+ - -vnf-config=/opt/sfc-controller/dev/vnf.conf
+ volumeMounts:
+ - name: controller-config
+ mountPath: /opt/sfc-controller/dev
+ volumes:
+ - name: controller-config
+ configMap:
+ name: sfc-controller-cfg
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: vswitch-agent-cfg
+ namespace: csit
+data:
+ etcd.conf:
+ insecure-transport: true
+ dial-timeout: 1000000000
+ endpoints:
+ - "172.17.0.1:22379"
+
+ kafka.conf:
+ addrs:
+ - "172.17.0.1:9092"
+
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: vswitch-vpp
+ namespace: csit
+spec:
+ hostNetwork: true
+ containers:
+ - name: "vswitch"
+ image: prod_vpp_agent_shrink
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ ports:
+ - containerPort: 5002
+ - containerPort: 9191
+ readinessProbe:
+ httpGet:
+ path: /readiness
+ port: 9191
+ periodSeconds: 1
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9191
+ periodSeconds: 1
+ initialDelaySeconds: 15
+ env:
+ - name: MICROSERVICE_LABEL
+ value: vswitch
+ volumeMounts:
+ - name: vpp-config
+ mountPath: /etc/vpp
+ - name: agent-config
+ mountPath: /opt/vpp-agent/dev
+ - name: memif-sockets
+ mountPath: /tmp
+ volumes:
+ - name: vpp-config
+ configMap:
+ name: vswitch-vpp-cfg
+ - name: agent-config
+ configMap:
+ name: vswitch-agent-cfg
+ - name: memif-sockets
+ hostPath:
+ path: /tmp
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: vnf-agent-cfg
+ namespace: csit
+data:
+ etcd.conf:
+ insecure-transport: true
+ dial-timeout: 1000000000
+ endpoints:
+ - "172.17.0.1:22379"
+
+ kafka.conf:
+ addrs:
+ - "172.17.0.1:9092"
+
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: vnf-vpp
+ namespace: csit
+spec:
+ containers:
+ - name: "vnf"
+ image: prod_vpp_agent_shrink
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ ports:
+ - containerPort: 5002
+ - containerPort: 9191
+ readinessProbe:
+ httpGet:
+ path: /readiness
+ port: 9191
+ periodSeconds: 1
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9191
+ initialDelaySeconds: 15
+ env:
+ - name: MICROSERVICE_LABEL
+ value: vnf1
+ volumeMounts:
+ - name: vpp-config
+ mountPath: /etc/vpp
+ - name: agent-config
+ mountPath: /opt/vpp-agent/dev
+ - name: memif-sockets
+ mountPath: /tmp
+ volumes:
+ - name: vpp-config
+ configMap:
+ name: vnf-vpp-cfg
+ - name: agent-config
+ configMap:
+ name: vnf-agent-cfg
+ - name: memif-sockets
+ hostPath:
+ path: /tmp
diff --git a/resources/templates/kubernetes/kafka.yaml b/resources/templates/kubernetes/kafka.yaml
new file mode 100644
index 0000000000..55d165f31b
--- /dev/null
+++ b/resources/templates/kubernetes/kafka.yaml
@@ -0,0 +1,22 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: kafka-server
+ namespace: csit
+spec:
+ hostNetwork: true
+ containers:
+ - image: spotify/kafka
+ name: kafka
+ ports:
+ - containerPort: 2181
+ hostPort: 2181
+ name: zookeeper
+ - containerPort: 9092
+ hostPort: 9092
+ name: kafka
+ env:
+ - name: ADVERTISED_HOST
+ value: "172.17.0.1"
+ - name: ADVERTISED_PORT
+ value: "9092"