Deploy a GKE Cluster with Portworx

Let’s deploy a nice GKE Cluster with a customized Portworx deployment using security capabilities and encrypted volumes

Get your own GCP account, download gcloud and authenticate on your laptop.

gcloud container clusters create carlos-lab01 \
    --zone us-east1-b \
    --disk-type=pd-ssd \
    --disk-size=50GB \
    --labels=portworx=gke \
    --machine-type=n1-highcpu-8 \
    --num-nodes=5 \
    --image-type ubuntu \
    --scopes compute-rw,storage-ro,cloud-platform \
    --enable-autoscaling --max-nodes=6 --min-nodes=5

Wail until having your cluster available

Then you can install Portworx using the operator.

operator.yaml

# SOURCE: https://install.portworx.com/?comp=pxoperator
apiVersion: v1
kind: ServiceAccount
metadata:
  name: portworx-operator
  namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
   name: portworx-operator
rules:
  - apiGroups: ["*"]
    resources: ["*"]
    verbs: ["*"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: portworx-operator
subjects:
- kind: ServiceAccount
  name: portworx-operator
  namespace: kube-system
roleRef:
  kind: ClusterRole
  name: portworx-operator
  apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: portworx-operator
  namespace: kube-system
spec:
  strategy:
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 1
    type: RollingUpdate
  replicas: 1
  selector:
    matchLabels:
      name: portworx-operator
  template:
    metadata:
      labels:
        name: portworx-operator
    spec:
      containers:
      - name: portworx-operator
        imagePullPolicy: Always
        image: portworx/px-operator:1.4.5
        command:
        - /operator
        - --verbose
        - --driver=portworx
        - --leader-elect=true
        env:
        - name: OPERATOR_NAME
          value: portworx-operator
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: "name"
                    operator: In
                    values:
                    - portworx-operator
              topologyKey: "kubernetes.io/hostname"
      serviceAccountName: portworx-operator

px-enterprisecluster.yaml

# SOURCE: https://install.portworx.com/?operator=true&mc=false&kbver=1.18.17&b=true&s=%22type%3Dpd-ssd%2Csize%3D150%22&j=auto&kd=type%3Dpd-standard%2Csize%3D150&c=px-cluster-2d05a36f-a282-4f71-9dca-7b0f25a11604&gke=true&stork=true&st=k8s
kind: StorageCluster
apiVersion: core.libopenstorage.org/v1
metadata:
  name: px-cluster-2d05a36f-a282-4f71-9dca-7b0f25a11604
  namespace: kube-system
  annotations:
    portworx.io/install-source: "https://install.portworx.com/?operator=true&mc=false&kbver=1.18.17&b=true&s=%22type%3Dpd-ssd%2Csize%3D150%22&j=auto&kd=type%3Dpd-standard%2Csize%3D150&c=px-cluster-2d05a36f-a282-4f71-9dca-7b0f25a11604&gke=true&stork=true&st=k8s"
    portworx.io/is-gke: "true"
    portworx.io/misc-args: "-kvdb_cluster_size=5"
spec:
  image: portworx/oci-monitor:2.7.1
  imagePullPolicy: Always
  kvdb:
    internal: true
  cloudStorage:
    deviceSpecs:
    - type=pd-ssd,size=150
    journalDeviceSpec: auto
    kvdbDeviceSpec: type=pd-standard,size=150
  secretsProvider: k8s
  stork:
    enabled: true
    args:
      webhook-controller: "false"
  autopilot:
    enabled: true
kubectl create clusterrolebinding myname-cluster-admin-binding \
    --clusterrole=cluster-admin --user=`gcloud info --format='value(config.account)'`

kubectl apply -f operator.yaml

kubectl apply -f px-enterprisecluster.yaml
kubectl get all -n kube-system
NAME                                                        READY   STATUS    RESTARTS   AGE
pod/autopilot-5b45f566bf-4ldr7                              1/1     Running   0          30m
pod/portworx-api-brtfh                                      1/1     Running   0          30m
pod/portworx-api-dwmr8                                      1/1     Running   0          30m
pod/portworx-api-qg6kd                                      1/1     Running   0          30m
pod/portworx-api-r2thk                                      1/1     Running   0          30m
pod/portworx-api-rx7bs                                      1/1     Running   0          30m
pod/portworx-api-vr649                                      1/1     Running   0          30m
pod/portworx-kvdb-cw4t4                                     1/1     Running   0          28m
pod/portworx-kvdb-f9jpw                                     1/1     Running   0          28m
pod/portworx-kvdb-ng8gm                                     1/1     Running   0          28m
pod/portworx-kvdb-njv9l                                     1/1     Running   0          28m
pod/portworx-kvdb-v22kt                                     1/1     Running   0          28m
pod/portworx-pvc-controller-78d55dfd8c-gcxcb                1/1     Running   0          30m
pod/portworx-pvc-controller-78d55dfd8c-tm44l                1/1     Running   0          30m
pod/portworx-pvc-controller-78d55dfd8c-xpdgx                1/1     Running   0          30m
pod/px-cluster-2d05a36f-a282-4f71-9dca-7b0f25a11604-2xnwh   1/1     Running   0          30m
pod/px-cluster-2d05a36f-a282-4f71-9dca-7b0f25a11604-fpc5s   1/1     Running   0          30m
pod/px-cluster-2d05a36f-a282-4f71-9dca-7b0f25a11604-g7kkj   1/1     Running   0          30m
pod/px-cluster-2d05a36f-a282-4f71-9dca-7b0f25a11604-jplpl   1/1     Running   0          30m
pod/px-cluster-2d05a36f-a282-4f71-9dca-7b0f25a11604-mtr5w   1/1     Running   0          30m
pod/px-cluster-2d05a36f-a282-4f71-9dca-7b0f25a11604-p5f47   1/1     Running   0          30m
pod/stork-7cc5dd57c4-mqfh5                                  1/1     Running   0          30m
pod/stork-7cc5dd57c4-r95dq                                  1/1     Running   0          30m
pod/stork-7cc5dd57c4-t9b5z                                  1/1     Running   0          30m
pod/stork-scheduler-76c7f84b86-6bdtp                        1/1     Running   0          30m
pod/stork-scheduler-76c7f84b86-br6b5                        1/1     Running   0          30m
pod/stork-scheduler-76c7f84b86-ngss6                        1/1     Running   0          30m

NAME                       TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)                               AGE
service/portworx-api       ClusterIP   10.3.254.226   <none>        9001/TCP,9020/TCP,9021/TCP            30m
service/portworx-service   ClusterIP   10.3.252.179   <none>        9001/TCP,9019/TCP,9020/TCP,9021/TCP   30m
service/stork-service      ClusterIP   10.3.247.203   <none>        8099/TCP,443/TCP                      30m

NAME                          DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
daemonset.apps/portworx-api   6         6         6       6            6           <none>          30m

NAME                                      READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/autopilot                 1/1     1            1           30m
deployment.apps/portworx-pvc-controller   3/3     3            3           30m
deployment.apps/stork                     3/3     3            3           30m
deployment.apps/stork-scheduler           3/3     3            3           30m

NAME                                                 DESIRED   CURRENT   READY   AGE
replicaset.apps/autopilot-5b45f566bf                 1         1         1       30m
replicaset.apps/portworx-pvc-controller-78d55dfd8c   3         3         3       30m
replicaset.apps/stork-7cc5dd57c4                     3         3         3       30m
replicaset.apps/stork-scheduler-76c7f84b86           3         3         3       30m

You can try to test the features of Portworx deploying one Statefulset application

kubectl get sc
NAME                             PROVISIONER                     RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
premium-rwo                      pd.csi.storage.gke.io           Delete          WaitForFirstConsumer   true                   29h
px-db                            kubernetes.io/portworx-volume   Delete          Immediate              true                   29h
px-db-cloud-snapshot             kubernetes.io/portworx-volume   Delete          Immediate              true                   29h
px-db-cloud-snapshot-encrypted   kubernetes.io/portworx-volume   Delete          Immediate              true                   29h
px-db-encrypted                  kubernetes.io/portworx-volume   Delete          Immediate              true                   29h
px-db-local-snapshot             kubernetes.io/portworx-volume   Delete          Immediate              true                   29h
px-db-local-snapshot-encrypted   kubernetes.io/portworx-volume   Delete          Immediate              true                   29h
px-replicated                    kubernetes.io/portworx-volume   Delete          Immediate              true                   29h
px-replicated-encrypted          kubernetes.io/portworx-volume   Delete          Immediate              true                   29h
px-secure-sc                     kubernetes.io/portworx-volume   Delete          Immediate              false                  28h
standard (default)               kubernetes.io/gce-pd            Delete          Immediate              true                   29h
standard-rwo                     pd.csi.storage.gke.io           Delete          WaitForFirstConsumer   true                   29h
stork-snapshot-sc                stork-snapshot                  Delete          Immediate              true                   37m

Alright then, we need to create a Cluster Wide secret key to handle our encrypted StorageClasses

YOUR_SECRET_KEY=this-is-gonna-be-your-secret-key

kubectl -n kube-system create secret generic px-vol-encryption \
  --from-literal=cluster-wide-secret-key=$YOUR_SECRET_KEY

And apply this secret to Portworx

PX_POD=$(kubectl get pods -l name=portworx -n kube-system -o jsonpath='{.items[0].metadata.name}')
kubectl exec $PX_POD -n kube-system -- /opt/pwx/bin/pxctl secrets set-cluster-key \
  --secret cluster-wide-secret-key

Once having your cluster wide secret in place, you can enable the cluster security on your storagecluster object, you can achieve this by editing the storagecluster object:

kubectl edit storagecluster -n kube-system

...
spec:
  security:
    enabled: true

And wait for the PX pods to be redeployed. To get access into your PX Cluster after this, you have to get the tokens on your pods.

PORTWORX_ADMIN_TOKEN=$(kubectl -n kube-system get secret px-admin-token -o json \
    | jq -r '.data."auth-token"' \
    | base64 -d)
    
PX_POD=$(kubectl get pods -l name=portworx -n kube-system -o jsonpath='{.items[0].metadata.name}')
kubectl exec -it $PX_POD -n kube-system -- /opt/pwx/bin/pxctl context create admin --token=$PORTWORX_ADMIN_TOKEN    

PX_POD=$(kubectl get pods -l name=portworx -n kube-system -o jsonpath='{.items[1].metadata.name}')
kubectl exec -it $PX_POD -n kube-system -- /opt/pwx/bin/pxctl context create admin --token=$PORTWORX_ADMIN_TOKEN    

PX_POD=$(kubectl get pods -l name=portworx -n kube-system -o jsonpath='{.items[2].metadata.name}')
kubectl exec -it $PX_POD -n kube-system -- /opt/pwx/bin/pxctl context create admin --token=$PORTWORX_ADMIN_TOKEN 


kubectl exec $PX_POD -n kube-system -- /opt/pwx/bin/pxctl secrets k8s login

Test the cluster with a StatefulSet

kubectl create namespace cassandra

Label three of your nodes with the label app=cassandra because this StatefulSet uses this label as node affinity policy.

kubectl label nodes <node01> <node02> <node03> app=cassandra

cassandra.yaml

apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: cassandra
  namespace: cassandra
  labels:
    app: cassandra
spec:
  serviceName: cassandra
  replicas: 3
  selector:
    matchLabels:
      app: cassandra
  template:
    metadata:
      labels:
        app: cassandra
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: app
                operator: In
                values:
                - cassandra
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
              - key: app
                operator: In
                values:
                - cassandra
            topologyKey: kubernetes.io/hostname
      terminationGracePeriodSeconds: 1800
      containers:
      - name: cassandra
        image: cassandra:3.11
        imagePullPolicy: Always
        ports:
        - containerPort: 7000
          name: intra-node
        - containerPort: 7001
          name: tls-intra-node
        - containerPort: 7199
          name: jmx
        - containerPort: 9042
          name: cql
        resources:
          limits:
            cpu: "500m"
            memory: 1Gi
          requests:
            cpu: "500m"
            memory: 1Gi
        securityContext:
          capabilities:
            add:
              - IPC_LOCK
        lifecycle:
          preStop:
            exec:
              command: 
              - /bin/sh
              - -c
              - nodetool drain
        env:
          - name: MAX_HEAP_SIZE
            value: 512M
          - name: HEAP_NEWSIZE
            value: 100M
          - name: CASSANDRA_SEEDS
            value: "cassandra-0.cassandra.cassandra.svc.cluster.local"
          - name: CASSANDRA_CLUSTER_NAME
            value: "K8Demo"
          - name: CASSANDRA_DC
            value: "DC1-K8Demo"
          - name: CASSANDRA_RACK
            value: "Rack1-K8Demo"
          - name: POD_IP
            valueFrom:
              fieldRef:
                fieldPath: status.podIP
        readinessProbe:
          tcpSocket:
            port: 9042
          initialDelaySeconds: 30
          timeoutSeconds: 7
        volumeMounts:
        - name: cassandra-data
          mountPath: /var/lib/cassandra
  volumeClaimTemplates:
  - metadata:
      name: cassandra-data
    spec:
      accessModes: [ "ReadWriteOnce" ]
      storageClassName: px-db-encrypted
      resources:
        requests:
          storage: 2Gi
---
apiVersion: v1
kind: Service
metadata:
  name: cassandra
  namespace: cassandra
spec:
  clusterIP: None
  selector:
    app: cassandra
  ports:
    - protocol: TCP
      name: port9042k8s
      port: 9042
      targetPort: 9042

Apply this file

kubectl apply -f cassandra.yaml
kubectl get pvc -n cassandra                                
NAME                         STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS      AGE
cassandra-data-cassandra-0   Bound    pvc-81e11ede-e78a-4fd5-ae64-1ca451d8c8f9   2Gi        RWO            px-db-encrypted   116m
cassandra-data-cassandra-1   Bound    pvc-a83b23ee-1426-4b78-ae29-f6a562701e68   2Gi        RWO            px-db-encrypted   113m
cassandra-data-cassandra-2   Bound    pvc-326a2419-cf50-41d3-93d0-63dbecffbcdd   2Gi        RWO            px-db-encrypted   111m

Leave a Comment