3. Playing with Containers
- Scaling your containers
- Updating live containers
- Forwarding container ports
- Ensuring flexible usage of your containers
- Submitting Jobs on Kubernetes
- Working with configuration files
Scaling your containers
cat << EOF > 3-1-1_deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-nginx
spec:
replicas: 2
selector:
matchLabels:
service : nginx
template:
metadata:
labels:
service : nginx
spec:
containers:
- name: my-container
image: nginx
---
apiVersion: v1
kind: Service
metadata:
name: my-nginx
spec:
ports:
- protocol: TCP
port: 80
nodePort: 30080
type: NodePort
selector:
service: nginx
EOF
kubectl create -f 3-1-1_deployment.yaml
deployment.apps "my-nginx" created
service "my-nginx" created
kubectl get pods
NAME READY STATUS RESTARTS AGE
my-nginx-f465f5465-nsdxg 1/1 Running 0 1m
my-nginx-f465f5465-qm8xf 1/1 Running 0 1m
kubectl get services
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.43.0.1 <none> 443/TCP 57m
my-nginx NodePort 10.43.52.172 <none> 80:30080/TCP 1m
Horizontal Pod Autoscaler (HPA)
An HPA queries the source of metrics periodically and determines whether scaling is required by a controller based on the metrics it gets. There are two types of metrics that could be fetched; one is from Heapster, another is from RESTful client access. In the following example, we'll show you how to use Heapster to monitor Pods and expose the metrics to an HPA.
kubectl create -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/monitoring-standalone/v1.7.0.yaml
deployment.extensions "heapster" created
service "heapster" created
serviceaccount "heapster" created
clusterrolebinding.rbac.authorization.k8s.io "heapster" created
role.rbac.authorization.k8s.io "system:pod-nanny" created
rolebinding.rbac.authorization.k8s.io "heapster-binding" created
kubectl get pods --all-namespaces | grep heapster
kube-system heapster-574b644764-f9wxk 2/2 Running 0 21s
kubectl get deployments
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
my-nginx 2 2 2 2 1m
kubectl autoscale deployment my-nginx --cpu-percent=50 --min=2 --max=5
deployment.apps "my-nginx" autoscaled
cat << EOF > 3-1-2_hpa.yaml
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: my-nginx
spec:
scaleTargetRef:
kind: Deployment
name: my-nginx
minReplicas: 2
maxReplicas: 5
targetCPUUtilizationPercentage: 50
EOF
kubectl create -f 3-1-2_hpa.yaml
horizontalpodautoscaler.autoscaling "my-nginx" created
kubectl get hpa
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
my-nginx Deployment/my-nginx <unknown>/50% 2 5 0 1m