kubernertes部署四层ingress
13 min read

kubernertes部署四层ingress

ingress.yaml内容如下:

# cat ingress.yaml
---
apiVersion: v1
kind: Namespace
metadata:
  name: ingress-nginx

---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: nginx-ingress-controller
  namespace: ingress-nginx
spec:
  selector:
    matchLabels:
      app: ingress-nginx
  template:
    metadata:
      labels:
        app: ingress-nginx
      annotations:
        prometheus.io/port: '10254'
        prometheus.io/scrape: 'true'
    spec:
      serviceAccountName: nginx-ingress-serviceaccount
      hostNetwork: true
      nodeSelector:
        edgenode: 'true'
      containers:
        - name: nginx-ingress-controller
          image: registry.cn-hangzhou.aliyuncs.com/google_containers/nginx-ingress-controller:0.20.0
          args:
            - /nginx-ingress-controller
            - --default-backend-service=$(POD_NAMESPACE)/default-http-backend
            - --configmap=$(POD_NAMESPACE)/nginx-configuration
            - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
            - --udp-services-configmap=$(POD_NAMESPACE)/udp-services
          env:
            - name: POD_NAME
              valueFrom:
                fieldRef:
                  fieldPath: metadata.name
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
          ports:
          - name: http
            containerPort: 80
          - name: https
            containerPort: 443
          livenessProbe:
            failureThreshold: 3
            httpGet:
              path: /healthz
              port: 10254
              scheme: HTTP
            initialDelaySeconds: 10
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 1
          readinessProbe:
            failureThreshold: 3
            httpGet:
              path: /healthz
              port: 10254
              scheme: HTTP
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 1

---
apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx-configuration
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: default-http-backend
  labels:
    app: default-http-backend
  namespace: ingress-nginx
spec:
  selector:
    matchLabels:
      app: default-http-backend
  replicas: 1
  template:
    metadata:
      labels:
        app: default-http-backend
    spec:
      terminationGracePeriodSeconds: 60
      containers:
      - name: default-http-backend
        # Any image is permissable as long as:
        # 1. It serves a 404 page at /
        # 2. It serves 200 on a /healthz endpoint
        image: registry.cn-hangzhou.aliyuncs.com/google_containers/defaultbackend:1.4
        livenessProbe:
          httpGet:
            path: /healthz
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 30
          timeoutSeconds: 5
        ports:
        - containerPort: 8080
        resources:
          limits:
            cpu: 10m
            memory: 20Mi
          requests:
            cpu: 10m
            memory: 20Mi
---

apiVersion: v1
kind: Service
metadata:
  name: default-http-backend
  namespace: ingress-nginx
  labels:
    app: default-http-backend
spec:
  ports:
  - port: 80
    targetPort: 8080
  selector:
    app: default-http-backend

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nginx-ingress-serviceaccount
  namespace: ingress-nginx

---

apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
  name: nginx-ingress-clusterrole
rules:
  - apiGroups:
      - ""
    resources:
      - configmaps
      - endpoints
      - nodes
      - pods
      - secrets
    verbs:
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - nodes
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - services
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - "extensions"
    resources:
      - ingresses
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - ""
    resources:
        - events
    verbs:
        - create
        - patch
  - apiGroups:
      - "extensions"
    resources:
      - ingresses/status
    verbs:
      - update

---

apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
  name: nginx-ingress-role
  namespace: ingress-nginx
rules:
  - apiGroups:
      - ""
    resources:
      - configmaps
      - pods
      - secrets
      - namespaces
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - configmaps
    resourceNames:
      # Defaults to "<election-id>-<ingress-class>"
      # Here: "<ingress-controller-leader>-<nginx>"
      # This has to be adapted if you change either parameter
      # when launching the nginx-ingress-controller.
      - "ingress-controller-leader-nginx"
    verbs:
      - get
      - update
  - apiGroups:
      - ""
    resources:
      - configmaps
    verbs:
      - create
  - apiGroups:
      - ""
    resources:
      - endpoints
    verbs:
      - get

---

apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
  name: nginx-ingress-role-nisa-binding
  namespace: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: nginx-ingress-role
subjects:
  - kind: ServiceAccount
    name: nginx-ingress-serviceaccount
    namespace: ingress-nginx

---

apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: nginx-ingress-clusterrole-nisa-binding
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: nginx-ingress-clusterrole
subjects:
  - kind: ServiceAccount
    name: nginx-ingress-serviceaccount
    namespace: ingress-nginx

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: tcp-services
  namespace: ingress-nginx

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: udp-services
  namespace: ingress-nginx

由于ingress是边缘节点,不需要每个机器上都启动,所以打个标签固定在固定的node节点上

[root@k8s-master-01 ingress]# kubectl label nodes 192.168.9.28 edgenode=true
node/192.168.9.28 labeled

[root@k8s-master-01 ingress]# kubectl label nodes 192.168.9.29 edgenode=true
node/192.168.9.29 labeled


[root@k8s-master-01 ingress]# kubectl get nodes 192.168.9.28 --show-labels
NAME           STATUS   ROLES    AGE   VERSION    LABELS
192.168.9.28   Ready    <none>   22h   v1.15.10   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,edgenode=true,kubernetes.io/arch=amd64,kubernetes.io/hostname=192.168.9.28,kubernetes.io/os=linux

[root@k8s-master-01 ingress]# kubectl get nodes 192.168.9.29 --show-labels
NAME           STATUS   ROLES    AGE   VERSION    LABELS
192.168.9.29   Ready    <none>   22h   v1.15.10   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,edgenode=true,kubernetes.io/arch=amd64,kubernetes.io/hostname=192.168.9.29,kubernetes.io/os=linux

启动:

[root@k8s-master-01 ingress]# kubectl apply -f ingress.yaml
namespace/ingress-nginx created
daemonset.apps/nginx-ingress-controller created
configmap/nginx-configuration created
deployment.apps/default-http-backend created
service/default-http-backend created
serviceaccount/nginx-ingress-serviceaccount created
clusterrole.rbac.authorization.k8s.io/nginx-ingress-clusterrole created
role.rbac.authorization.k8s.io/nginx-ingress-role created
rolebinding.rbac.authorization.k8s.io/nginx-ingress-role-nisa-binding created
clusterrolebinding.rbac.authorization.k8s.io/nginx-ingress-clusterrole-nisa-binding created
configmap/tcp-services created
configmap/udp-services created
[root@k8s-master-01 ingress]# kubectl get rs -n ingress-nginx
NAME                              DESIRED   CURRENT   READY   AGE
default-http-backend-84f5f6598b   1         1         1       17s

[root@k8s-master-01 ingress]# kubectl get deployment -n ingress-nginx
NAME                   READY   UP-TO-DATE   AVAILABLE   AGE
default-http-backend   1/1     1            1           24s

[root@k8s-master-01 ingress]# kubectl get ds -n ingress-nginx   #此处的ds为daemonset的缩写
NAME                       DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
nginx-ingress-controller   2         2         2       2            2           edgenode=true   52m

[root@k8s-master-01 ingress]# kubectl get pod -n ingress-nginx
NAME                                    READY   STATUS    RESTARTS   AGE
default-http-backend-84f5f6598b-6h2hf   1/1     Running   0          30s

附上该yaml所需要的两个image
registry.cn-hangzhou.aliyuncs.com/google_containers/defaultbackend:1.4
registry.cn-hangzhou.aliyuncs.com/google_containers/nginx-ingress-controller:0.20.0

验证
[root@k8s-master-01 ~]# kubectl run nginx --image=nginx:latest --replicas=2
deployment.apps/nginx created

[root@k8s-master-01 ~]# kubectl get pods
NAME                     READY   STATUS         RESTARTS   AGE
nginx-64cccc97fb-hb77d   0/1     ErrImagePull   0          31s
nginx-64cccc97fb-r8wzq   0/1     ErrImagePull   0          31s

[root@k8s-master-01 ~]# kubectl get deployment
NAME    READY   UP-TO-DATE   AVAILABLE   AGE
nginx   0/2     2            0           61s

[root@k8s-master-01 ~]# kubectl edit deployment nginx
deployment.extensions/nginx edited
# 将`imagePullPolicy: Always`改为`imagePullPolicy: IfNotPresent`。

[root@k8s-master-01 ~]# kubectl get pods
NAME                     READY   STATUS    RESTARTS   AGE
nginx-6cddc97554-8nm4t   1/1     Running   0          7s
nginx-6cddc97554-kwztv   1/1     Running   0          5s

[root@k8s-master-01 ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.10.10.1   <none>        443/TCP   22h

[root@k8s-master-01 ~]# kubectl expose deployment nginx --port=80 --target-port=80
service/nginx exposed

[root@k8s-master-01 ~]# kubectl get service
NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.10.10.1    <none>        443/TCP   22h
nginx        ClusterIP   10.10.10.50   <none>        80/TCP    3s

附上本测试实例所用的nginx镜像
nginx:latest
编写nginx的ingress配置文件

[root@k8s-master-01 nginx]# cat tcp-service.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: tcp-services
  namespace: ingress-nginx
data:
  88: "default/nginx:80"


# 第一个88是ingress的暴露端口
# default是业务应用所在的命名空间
# nginx是业务服务名(kubectl get svc)
# 最后个80是容器内端口
启动并验证
[root@k8s-master-01 nginx]# kubectl apply -f tcp-service.yaml
configmap/tcp-services configured

[root@k8s-master-01 nginx]# kubectl get cm -n ingress-nginx    # 这里的cm是configmap的缩写
NAME                  DATA   AGE
nginx-configuration   0      42m
tcp-services          1      42m
udp-services          0      42m

[root@k8s-master-01 nginx]# # kubectl describe  cm tcp-services -n ingress-nginx
Name:         tcp-services
Namespace:    ingress-nginx
Labels:       <none>
Annotations:  kubectl.kubernetes.io/last-applied-configuration:
                {"apiVersion":"v1","data":{"88":"default/nginx:80"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"tcp-services","namespace":"ing...

Data
====
88:
----
default/nginx:80
Events:
  Type    Reason  Age   From                      Message
  ----    ------  ----  ----                      -------
  Normal  CREATE  33s   nginx-ingress-controller  ConfigMap ingress-nginx/tcp-services
  Normal  CREATE  33s   nginx-ingress-controller  ConfigMap ingress-nginx/tcp-services
 
测试
[root@k8s-master-01 ~]# kubectl get pods -o wide
NAME                     READY   STATUS    RESTARTS   AGE   IP            NODE           NOMINATED NODE   READINESS GATES
nginx-6cddc97554-8nm4t   1/1     Running   0          31m   172.16.56.3   192.168.9.29   <none>           <none>
nginx-6cddc97554-kwztv   1/1     Running   0          31m   172.16.62.5   192.168.9.28   <none>           <none>

[root@k8s-master-01 ~]# curl -I 172.16.56.3:80
HTTP/1.1 200 OK
Server: nginx/1.19.3
Date: Tue, 03 Nov 2020 12:19:26 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 29 Sep 2020 14:12:31 GMT
Connection: keep-alive
ETag: "5f7340cf-264"
Accept-Ranges: bytes

[root@k8s-master-01 ~]# curl -I 192.168.9.29:88
HTTP/1.1 200 OK
Server: nginx/1.19.3
Date: Tue, 03 Nov 2020 12:19:33 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 29 Sep 2020 14:12:31 GMT
Connection: keep-alive
ETag: "5f7340cf-264"
Accept-Ranges: bytes


都正常返回Welcome to nginx,测试成功。

附上一个我们线上的4层ingress的yaml配置文件

# cat tcp-service.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: tcp-services
  namespace: ingress-nginx
data:
  50005: "private/blended-learning-celery-beat:50005"
  50004: "private/blended-learning-celery-cam:50004"
  50002: "private/blended-learning-celery-download-worker:50002"
  50001: "private/blended-learning-celery-pdf-worker:50001"
  50000: "private/blended-learning-celery-worker:50000"
  39154: "private/blended-learning-rain-django:39154"
  50006: "private/courseware27-celery-beat:50006"
  50003: "private/courseware27-celery-worker:50003"
  39176: "private/courseware27-gunicorn:39176"
  50021: "private/examination-beat:50021"
  50022: "private/examination-default-worker:50022"
  39889: "private/examination-gunicorn:39889"
  50019: "private/examination-proctor:50019"
  50023: "private/examination-submit-worker:50023"
  39178: "private/forum:39178"
  33576: "private/import-gdufe-data:33576"
  39199: "private/mobile-api:39199"
  50007: "private/new-xuetangx-beat:50007"
  39177: "private/new-xuetangx-gunicorn:39177"
  50008: "private/new-xuetangx-worker:50008"
  9897: "private/platform-cron:9897"
  9898: "private/platform-search:9898"
  50035: "private/problemparser:50035"
  42125: "private/rain-docx:42125"
  43045: "private/rain-node-drop:43045"
  32010: "private/rain-node-eggweb:32010"
  31841: "private/rain-node-logqueue:31841"
  45733: "private/rain-node-messenger-http:45733"
  31895: "private/rain-node-messenger-websockets:31895"
  10086: "private/rain-node-oplat-http:10086"
  39126: "private/rain-node-oplat-websockets:39126"
  49344: "private/rain-node-pipe:49344"
  45398: "private/rain-node-wepushqueue:45398"
  50015: "private/score-celery-beat:50015"
  50017: "private/score-celery-receive:50017"
  50014: "private/score-celery-worker:50014"
  39299: "private/score-gunicornserver:39299"
  50011: "private/xuetangx-video-log-celery-beat:50011"
  50012: "private/xuetangx-video-log-celery-worker:50012"
  39180: "private/xuetangx-video-log-gunicorn:39180"

取出这写服务的方法:

[root@k8s-master-01 service]# kubectl get svc -n private
NAME                                               TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)           AGE
blended-learning-celery-beat                       ClusterIP   10.10.10.96    <none>        50005/TCP         137m
blended-learning-celery-beat-nodeport              NodePort    10.10.10.213   <none>        50005:42881/TCP   137m
blended-learning-celery-cam                        ClusterIP   10.10.10.6     <none>        50004/TCP         137m
blended-learning-celery-cam-nodeport               NodePort    10.10.10.32    <none>        50004:46254/TCP   137m
blended-learning-celery-download-worker            ClusterIP   10.10.10.226   <none>        50002/TCP         137m
blended-learning-celery-download-worker-nodeport   NodePort    10.10.10.236   <none>        50002:37396/TCP   137m
blended-learning-celery-pdf-worker                 ClusterIP   10.10.10.28    <none>        50001/TCP         137m
blended-learning-celery-pdf-worker-nodeport        NodePort    10.10.10.63    <none>        50001:32272/TCP   137m
blended-learning-celery-worker                     ClusterIP   10.10.10.187   <none>        50000/TCP         137m
blended-learning-celery-worker-nodeport            NodePort    10.10.10.188   <none>        50000:36212/TCP   137m
blended-learning-rain-django                       ClusterIP   10.10.10.51    <none>        39154/TCP         137m
blended-learning-rain-django-nodeport              NodePort    10.10.10.30    <none>        39154:38752/TCP   137m
courseware27-celery-beat                           ClusterIP   10.10.10.212   <none>        50006/TCP         137m
courseware27-celery-beat-nodeport                  NodePort    10.10.10.90    <none>        50006:39031/TCP   137m
courseware27-celery-worker                         ClusterIP   10.10.10.252   <none>        50003/TCP         137m
courseware27-celery-worker-nodeport                NodePort    10.10.10.55    <none>        50003:32391/TCP   137m
courseware27-gunicorn                              ClusterIP   10.10.10.197   <none>        39176/TCP         137m
courseware27-gunicorn-nodeport                     NodePort    10.10.10.194   <none>        39176:30959/TCP   137m
examination-beat                                   ClusterIP   10.10.10.79    <none>        50021/TCP         137m
examination-beat-nodeport                          NodePort    10.10.10.180   <none>        50021:40748/TCP   137m
examination-default-worker                         ClusterIP   10.10.10.171   <none>        50022/TCP         137m
examination-default-worker-nodeport                NodePort    10.10.10.11    <none>        50022:36629/TCP   137m
examination-gunicorn                               ClusterIP   10.10.10.237   <none>        39889/TCP         137m
examination-gunicorn-nodeport                      NodePort    10.10.10.10    <none>        39889:38060/TCP   137m
examination-proctor                                ClusterIP   10.10.10.83    <none>        50019/TCP         137m
examination-proctor-nodeport                       NodePort    10.10.10.214   <none>        50019:38350/TCP   137m
examination-submit-worker                          ClusterIP   10.10.10.243   <none>        50023/TCP         137m
examination-submit-worker-nodeport                 NodePort    10.10.10.198   <none>        50023:49237/TCP   137m
forum                                              ClusterIP   10.10.10.34    <none>        39178/TCP         137m
forum-nodeport                                     NodePort    10.10.10.86    <none>        39178:35895/TCP   137m
import-gdufe-data                                  ClusterIP   10.10.10.148   <none>        33576/TCP         137m
import-gdufe-data-nodeport                         NodePort    10.10.10.254   <none>        33576:34493/TCP   137m
mobile-api                                         ClusterIP   10.10.10.234   <none>        39199/TCP         137m
mobile-api-nodeport                                NodePort    10.10.10.238   <none>        39199:40253/TCP   137m
new-xuetangx-beat                                  ClusterIP   10.10.10.163   <none>        50007/TCP         137m
new-xuetangx-beat-nodeport                         NodePort    10.10.10.133   <none>        50007:34136/TCP   137m
new-xuetangx-gunicorn                              ClusterIP   10.10.10.48    <none>        39177/TCP         137m
new-xuetangx-gunicorn-nodeport                     NodePort    10.10.10.244   <none>        39177:35713/TCP   137m
new-xuetangx-worker                                ClusterIP   10.10.10.221   <none>        50008/TCP         137m
new-xuetangx-worker-nodeport                       NodePort    10.10.10.57    <none>        50008:42883/TCP   137m
platform-cron                                      ClusterIP   10.10.10.241   <none>        9897/TCP          137m
platform-cron-nodeport                             NodePort    10.10.10.59    <none>        9897:30089/TCP    137m
platform-search                                    ClusterIP   10.10.10.13    <none>        9898/TCP          137m
platform-search-nodeport                           NodePort    10.10.10.88    <none>        9898:45143/TCP    137m
problemparser                                      ClusterIP   10.10.10.102   <none>        50035/TCP         137m
problemparser-nodeport                             NodePort    10.10.10.199   <none>        50035:49730/TCP   137m
rain-docx                                          ClusterIP   10.10.10.46    <none>        42125/TCP         137m
rain-docx-nodeport                                 NodePort    10.10.10.31    <none>        42125:42207/TCP   137m
rain-node-drop                                     ClusterIP   10.10.10.123   <none>        43045/TCP         137m
rain-node-drop-nodeport                            NodePort    10.10.10.60    <none>        43045:32413/TCP   137m
rain-node-eggweb                                   ClusterIP   10.10.10.124   <none>        32010/TCP         137m
rain-node-eggweb-nodeport                          NodePort    10.10.10.82    <none>        32010:43800/TCP   137m
rain-node-logqueue                                 ClusterIP   10.10.10.64    <none>        31841/TCP         137m
rain-node-logqueue-nodeport                        NodePort    10.10.10.137   <none>        31841:46274/TCP   137m
rain-node-messenger-http                           ClusterIP   10.10.10.39    <none>        45733/TCP         137m
rain-node-messenger-http-nodeport                  NodePort    10.10.10.27    <none>        45733:38857/TCP   137m
rain-node-messenger-websockets                     ClusterIP   10.10.10.156   <none>        31895/TCP         137m
rain-node-messenger-websockets-nodeport            NodePort    10.10.10.215   <none>        31895:42927/TCP   137m
rain-node-oplat-http                               ClusterIP   10.10.10.200   <none>        10086/TCP         137m
rain-node-oplat-http-nodeport                      NodePort    10.10.10.12    <none>        10086:34478/TCP   137m
rain-node-oplat-websockets                         ClusterIP   10.10.10.185   <none>        39126/TCP         137m
rain-node-oplat-websockets-nodeport                NodePort    10.10.10.167   <none>        39126:47557/TCP   137m
rain-node-pipe                                     ClusterIP   10.10.10.42    <none>        49344/TCP         137m
rain-node-pipe-nodeport                            NodePort    10.10.10.14    <none>        49344:48790/TCP   137m
rain-node-wepushqueue                              ClusterIP   10.10.10.135   <none>        45398/TCP         137m
rain-node-wepushqueue-nodeport                     NodePort    10.10.10.125   <none>        45398:40877/TCP   137m
score-celery-beat                                  ClusterIP   10.10.10.201   <none>        50015/TCP         137m
score-celery-beat-nodeport                         NodePort    10.10.10.7     <none>        50015:45094/TCP   137m
score-celery-receive                               ClusterIP   10.10.10.56    <none>        50017/TCP         137m
score-celery-receive-nodeport                      NodePort    10.10.10.131   <none>        50017:30934/TCP   137m
score-celery-worker                                ClusterIP   10.10.10.164   <none>        50014/TCP         137m
score-celery-worker-nodeport                       NodePort    10.10.10.75    <none>        50014:49868/TCP   137m
score-gunicornserver                               ClusterIP   10.10.10.189   <none>        39299/TCP         137m
score-gunicornserver-nodeport                      NodePort    10.10.10.93    <none>        39299:49457/TCP   137m
xuetangx-video-log-celery-beat                     ClusterIP   10.10.10.77    <none>        50011/TCP         137m
xuetangx-video-log-celery-beat-nodeport            NodePort    10.10.10.227   <none>        50011:49067/TCP   137m
xuetangx-video-log-celery-worker                   ClusterIP   10.10.10.61    <none>        50012/TCP         137m
xuetangx-video-log-celery-worker-nodeport          NodePort    10.10.10.17    <none>        50012:38042/TCP   137m
xuetangx-video-log-gunicorn                        ClusterIP   10.10.10.70    <none>        39180/TCP         137m
xuetangx-video-log-gunicorn-nodeport               NodePort    10.10.10.235   <none>        39180:30810/TCP   137m

取出对应的关系,然后添加到yaml文件中

[root@k8s-master-01 service]# kubectl get svc -n private|grep -v NodePort|awk '{print $1":"$5}'|grep -v NAME|awk -F '/' '{print $1}'|awk -F ':' '{print $2": \"private/"$1":"$2"\""}'
50005: "private/blended-learning-celery-beat:50005"
50004: "private/blended-learning-celery-cam:50004"
50002: "private/blended-learning-celery-download-worker:50002"
50001: "private/blended-learning-celery-pdf-worker:50001"
50000: "private/blended-learning-celery-worker:50000"
39154: "private/blended-learning-rain-django:39154"
50006: "private/courseware27-celery-beat:50006"
50003: "private/courseware27-celery-worker:50003"
39176: "private/courseware27-gunicorn:39176"
50021: "private/examination-beat:50021"
50022: "private/examination-default-worker:50022"
39889: "private/examination-gunicorn:39889"
50019: "private/examination-proctor:50019"
50023: "private/examination-submit-worker:50023"
39178: "private/forum:39178"
33576: "private/import-gdufe-data:33576"
39199: "private/mobile-api:39199"
50007: "private/new-xuetangx-beat:50007"
39177: "private/new-xuetangx-gunicorn:39177"
50008: "private/new-xuetangx-worker:50008"
9897: "private/platform-cron:9897"
9898: "private/platform-search:9898"
50035: "private/problemparser:50035"
42125: "private/rain-docx:42125"
43045: "private/rain-node-drop:43045"
32010: "private/rain-node-eggweb:32010"
31841: "private/rain-node-logqueue:31841"
45733: "private/rain-node-messenger-http:45733"
31895: "private/rain-node-messenger-websockets:31895"
10086: "private/rain-node-oplat-http:10086"
39126: "private/rain-node-oplat-websockets:39126"
49344: "private/rain-node-pipe:49344"
45398: "private/rain-node-wepushqueue:45398"
50015: "private/score-celery-beat:50015"
50017: "private/score-celery-receive:50017"
50014: "private/score-celery-worker:50014"
39299: "private/score-gunicornserver:39299"
50011: "private/xuetangx-video-log-celery-beat:50011"
50012: "private/xuetangx-video-log-celery-worker:50012"
39180: "private/xuetangx-video-log-gunicorn:39180"
京ICP备19055754号