- etcd
-
A container runner, one of:
- docker
- rkt
-
Kubernetes
- kubelet
- kube-proxy
- kube-apiserver
- kube-controller-manager
- kube-scheduler
docker pull quay.io/coreos/etcd
docker
tag
docker rmi -f quay.io/coreos/etcd
docker run -tid --restart=always
提示:本实验采用容器外Etcd集群
[root@node1 ~]#
ifconfig
eth0: flags=4163
flannel.1: flags=4163
iv.systemd管控
1.创建flanneld.service
cat >/lib/systemd/system/flanneld.service <<'HERE'
[Unit]
Description=Flannel Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/etc/flanneld.conf
ExecStart=/usr/local/bin/flanneld -subnet-file=/etc/profile.d/flanneld.env -etcd-endpoints=${FLANNELD_ETCD_ENDPOINTS}
Restart=on-failure
LimitNOFILE=1000000
[Install]
WantedBy=multi-user.target
HERE
2.创建主配置文件flanneld.conf
cat >/etc/flanneld.conf <<HERE
FLANNELD_ETCD_ENDPOINTS=http://192.168.8.101:2379,http://192.168.8.102:2379,http://192.168.8.103:2379
HERE
3.测试systemd启动flanneld
[root@node4 ~]# systemctl enable flanneld
Created symlink from /etc/systemd/system/multi-user.target.wants/flanneld.service to /usr/lib/systemd/system/flanneld.service.
[root@node4 ~]# systemctl restart flanneld
[root@node4 ~]# systemctl status flanneld
● flanneld.service - Flannel Server
8月 31 14:12:13 node4.example.com flanneld[2449]: I0831 14:12:13.759557 02449 local_manager.go...ng
8月 31 14:12:13 node4.example.com flanneld[2449]: I0831 14:12:13.771634 02449 manager.go:246] ...24
8月 31 14:12:13 node4.example.com systemd[1]: Started Flannel Server.
8月 31 14:12:13 node4.example.com flanneld[2449]: I0831 14:12:13.772516 02449 network.go:58] W...es
8月 31 14:12:13 node4.example.com flanneld[2449]: I0831 14:12:13.772545 02449 network.go:66] W...es
8月 31 14:12:13 node4.example.com flanneld[2449]: I0831 14:12:13.789447 02449 network.go:153] ...ts
8月 31 14:12:13 node4.example.com flanneld[2449]: I0831 14:12:13.789467 02449 device.go:163] c... 3
8月 31 14:12:13 node4.example.com flanneld[2449]: I0831 14:12:13.789578 02449 network.go:160] ...4b
8月 31 14:12:13 node4.example.com flanneld[2449]: I0831 14:12:13.789615 02449 network.go:160] ...01
8月 31 14:12:13 node4.example.com flanneld[2449]: I0831 14:12:13.789620 02449 network.go:160] ...0c
Hint: Some lines were ellipsized, use -l to show in full.
EnvironmentFile=/etc/profile.d/flanneld.env
ExecStart=/usr/bin/dockerd --registry-mirror http://192.168.8.254:5000 --insecure-registry 192.168.8.254:5000 -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU}
[root@node1 ~]# ifconfig docker0
docker0:
flags=4099
[root@node2 ~]# ifconfig docker0
docker0:
flags=4099
[root@node3 ~]# ifconfig docker0
docker0:
flags=4099
[root@node3 ~]# ping 10.1.68.1
PING 10.1.68.1 (10.1.68.1) 56(84) bytes of data.
64 bytes from 10.1.68.1: icmp_seq=1 ttl=64 time=1.82 ms
64 bytes from 10.1.68.1: icmp_seq=2 ttl=64 time=0.733 ms
tar xvf kubernetes.tar.gz
cp kubernetes/platforms/linux/amd64/kubectl /usr/bin
chmod +x /usr/bin/kubectl
tar
xvf
[root@node1 ~]# cd /opt/kubernetes/server/bin/
[root@node1 bin]# ls
federation-apiserver
federation-apiserver.docker_tag
federation-apiserver.tar
federation-controller-manager
federation-controller-manager.docker_tag
federation-controller-manager.tar
hyperkube
kube-apiserver
kube-apiserver.docker_tag
kube-apiserver.tar
kube-controller-manager
kube-controller-manager.docker_tag
提示:kubernetes二进制包直接提供了kube-apiserver, kube-controller-manager, and kube-scheduler等docker image,导入后即可使用
docker load -i kube-apiserver.tar
docker load -i
kube-controller-manager.tar
docker load -i
kube-scheduler.tar
[root@node1 bin]# docker images
REPOSITORY
etcd
gcr.io/google_containers/kube-apiserver
gcr.io/google_containers/kube-controller-manager
gcr.io/google_containers/kube-scheduler
curl
-sSL
tar -xvf go1.6.2.linux-amd64.tar.gz -C /opt
sudo cat >>/etc/profile <<'HERE'
export
export
export
HERE
source /etc/profile
提示:主要设置GOROOT(安装路径),GOPATH(go项目的存放位置,自定义)
root@router:~#go version
go version go1.6.2 linux/amd64
2.源码安装
提示:ssl等其它选项
--secure-port=443
--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
--service-account-key-file=/etc/kubernetes/ssl/apiserver-key.pem
--tls-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem
--tls-cert-file=/etc/kubernetes/ssl/apiserver.pem
--client-ca-file=/etc/kubernetes/ssl/ca.pem
I0831
15:29:17.507564
I0831
15:29:17.507951
I0831
15:29:17.507991
I0831
15:29:17.529334
I0831
15:29:17.529752
I0831
15:29:17.530187
解决:
但查下来,确定为误报,为什么这么说?
[root@node1 ~]# uname -r
3.10.0-327.el7.x86_64
[root@node1 ~]# grep
'CONFIG_BRIDGE_NETFILTER'
/boot/config-3.10.0-327.el7.x86_64
CONFIG_BRIDGE_NETFILTER=y
[root@node1 ~]# sysctl -a|grep 'nf-call-iptables'
net.bridge.bridge-nf-call-iptables
因为http://ebtables.netfilter.org/documentation/bridge-nf.html
Since Linux kernel 3.18-rc1, you have to modprobe br_netfilter to enable bridge-netfilter.
但CentOS7.3 是直接将br_netfilter编译进了内核,并且br-nf-call-iptables功能也是开启的。k8s官方issue上也有讨论这个问题,详见
https://github.com/kubernetes/kubernetes/issues/23385
However, kubelet prints a warning highlighting the absence
of br-netfilter
:
1:58.462930 18042 proxier.go:205] missing br-netfilter module or unset br-nf-call-iptables; proxy may not work as intended
This warning seems to be incorrect.
The check that triggers the warning is
in Newproxier
,
located in proxier.go
:
if _, err := os.Stat("/sys/module/br_netfilter"); os.IsNotExist(err) {
warnBrNetfilter = true
}
[root@node4 ~]# kubectl version
Client
Version: version.Info{Major:"1", Minor:"4", GitVersion:"v1.4.1",
GitCommit:"a16c0a7f71a6f93c7e0f222d
Client
Version: version.Info{Major:"1", Minor:"4", GitVersion:"v1.4.1",
GitCommit:"a16c0a7f71a6f93c7e0f222d
[root@node4 ~]# kubectl get componentstatuses
NAME
controller-manager
scheduler
etcd-1
etcd-0
etcd-2
[root@node4 ~]# kubectl get nodes
NAME
node1.example.com
node2.example.com
node3.example.com
提示:如果这里看到节点状态为NotReady,请检查是否有参数错误,个人之前一直是NotReady,原因是在不知道确切意思的情况下误将--configure-cbr0(默认为false)设置为true
[root@node4 ~]# curl -s http://192.168.8.201:8080/api
{
}
四.运行容器pods
方式一:直接命令行
[root@node4 ~]# kubectl run web --image=python3 --replicas=5 "python3 -m http.server 8080"
deployment "web" created
[root@node4 ~]# kubectl get pods
NAME
web-799709087-2dzex
web-799709087-8uyir
web-799709087-9hqiw
web-799709087-joh1u
web-799709087-zwczj
[root@node4 ~]# kubectl get deployment
NAME
web
[root@node4 ~]# kubectl describe pods web-799709087-2dzex
Name: web-799709087-2dzex
Namespace: default
Node: node3.example.com/192.168.8.103
Start Time: Sun, 28 Aug 2016 17:42:43 +0800
Labels: pod-template-hash=799709087
run=web
Status: Pending
IP:
Controllers: ReplicaSet/web-799709087
Containers:
Conditions:
No volumes.
QoS Tier: BestEffort
Events:
注意: 上面的操作后直接卡在ContainerCreating上,而实际上我事先在所有minion节点上早将python3的image 从本地仓库中pull了下来,但k8s依赖pause镜像gcr.io/google_containers/pause-amd64:3.0,不同的k8s版本依赖的pause版本不一样
解决办法:
A.VPN
请自行翻墙
B.伪装(所有Minion节点)
docker pull docker.io/kubernetes/pause
docker tag
docker rmi -f
道理很简单,先在minion节点本地准备好依赖的pause镜像,版本名称请与k8s依赖保持一致
以nginx再重新部署一次
[root@node4 ~]# kubectl delete deployment web
deployment "web" deleted
[root@node4 ~]# kubectl run nginx --image=nginx --replicas=2
deployment "nginx" created
[root@node4 ~]# kubectl get pods
NAME
nginx-3137573019-tza59
nginx-3137573019-xro4m
[root@node4 ~]# kubectl get pods -o wide
NAME
nginx-3137573019-0yuta
nginx-3137573019-fun4v
ok,己成功运行了容器nginx
提示:
方式二:配置文件(yaml,json)
方便长期维护与跟踪,建议使用配置文件方式来运行pods
http://kubernetes.io/docs/user-guide/deployments/
cat >nginx.yaml <<HERE
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
spec:
HERE
[root@node4 ~]#
kubectl create -f nginx.yaml
deployment "nginx-deployment" created
[root@node4 ~]# kubectl rollout status deployments nginx-deployment
deployment nginx-deployment successfully rolled out
[root@node4 ~]# kubectl get deployment
NAME
nginx
nginx-deployment
[root@node4 ~]# kubectl get pods
NAME
nginx-3137573019-0yuta
nginx-3137573019-fun4v
nginx-deployment-2445923563-az8ma
nginx-deployment-2445923563-bqlwd
nginx-deployment-2445923563-vz9l3
[root@node4 ~]# kubectl get rs
NAME
nginx-3137573019
nginx-deployment-2445923563
回滚
http://kubernetes.io/docs/user-guide/rolling-updates/
[root@node4 ~]# kubectl rollout undo deployment/nginx-deployment
deployment
"nginx-deployment" skipped rollback
(DeploymentRollbackRevisi
[root@node4 ~]# kubectl rollout undo deployment/nginx-deployment --to-revision=2
deployment
"nginx-deployment" skipped rollback
(DeploymentRollbackRevisi
更新
将pods拉伸至5,只需修改配置文件中的replicas:
[root@node4 ~]#
kubectl replace -f
nginx.yaml
deployment "nginx-deployment" replaced
[root@node4 ~]# kubectl get deployment
NAME
nginx
nginx-deployment
[root@node4 ~]# kubectl get pods
NAME
nginx-3137573019-0yuta
nginx-3137573019-fun4v
nginx-deployment-2445923563-az8ma
nginx-deployment-2445923563-bqlwd
nginx-deployment-2445923563-dr9dx
nginx-deployment-2445923563-s9vpy
nginx-deployment-2445923563-vz9l3
0宕机在线维护Minion主机(cordon,drain,uncordon)
场景:假如node1.example.com这台主机需要维护,但上面有容器正在运行
[root@node4 ~]# kubectl get pods -o wide
NAME
nginx-3137573019-0yuta
nginx-3137573019-fun4v
nginx-deployment-2445923563-4f2p6
nginx-deployment-2445923563-az8ma
nginx-deployment-2445923563-g0wkh
nginx-deployment-2445923563-mf0kf
nginx-deployment-2445923563-vz9l3
[root@node4 ~]# kubectl get nodes
NAME
node1.example.com
node2.example.com
node3.example.com
1.将要维护的Minion节点标识为SchedulingDisabled
有新部署的时候不会部署到该Minion节点
[root@node4 ~]# kubectl cordon node1.example.com
node "node1.example.com" cordoned
[root@node4 ~]# kubectl get nodes
NAME
node1.example.com
node2.example.com
node3.example.com
2.迁移要维护Minion节点上的容器
[root@node4 ~]# kubectl drain node1.example.com
node "node1.example.com" already cordoned
pod "nginx-deployment-2445923563-4f2p6" deleted
pod "nginx-deployment-2445923563-az8ma" deleted
pod "kubernetes-dashboard-3825951078-a9o82" deleted
pod "busybox-49452825-ldfpw" deleted
node "node1.example.com" drained
[root@node4 ~]# kubectl get pods -o wide
NAME
nginx-3137573019-0yuta
nginx-3137573019-fun4v
nginx-deployment-2445923563-3pwle
nginx-deployment-2445923563-41jqn
nginx-deployment-2445923563-g0wkh
nginx-deployment-2445923563-mf0kf
nginx-deployment-2445923563-vz9l3
3.维护完成后,撤销SchedulingDisabled 标识
[root@node4 ~]# kubectl uncordon node1.example.com
node "node1.example.com" uncordoned
[root@node4 ~]# kubectl get nodes
NAME
node1.example.com
node2.example.com
node3.example.com
创建rc(Replication-Controller)
http://kubernetes.io/docs/user-guide/replicasets/
http://kubernetes.io/docs/user-guide/replication-controller/
说明: rs是下一代的rc,默认创建的Deployment会以rs方式呈现,这也是很多示例中用get rc时看不到任何信息的原因
Replica Set is the next-generation
Replication Controller. The only difference between
a
cat >rc-nginx.yaml <<HERE
apiVersion: v1
kind: ReplicationController
metadata:
spec:
HERE
[root@node4 ~]#
kubectl create -f rc-nginx.yaml
replicationcontroller "rc-nginx" created
[root@node4 ~]# kubectl get rc
NAME
rc-nginx
[root@node4 ~]# kubectl describe rc
Name: rc-nginx
Namespace: default
Image(s): nginx
Selector: app=nginx
Labels: app=nginx
Replicas: 3 current / 3 desired
Pods Status: 3 Running / 0 Waiting / 0 Succeeded / 0 Failed
No volumes.
Events:
再创建一个redis pods
cat >redis.yaml <<HERE
apiVersion:
kind:
metadata:
spec:
HERE
[root@node4 ~]#
kubectl create -f redis.yaml
deployment "redis" created
[root@node4 ~]# kubectl get pods
NAME
rc-nginx-5lcku
rc-nginx-ffzu1
rc-nginx-mcaxg
redis-3972576797-3s64o
redis-3972576797-q7b0k
redis-3972576797-qc9xf
[root@node4 ~]# kubectl get rs
NAME
redis-3972576797
[root@node4 ~]# kubectl get rc
NAME
rc-nginx
删除rc
[root@node4 ~]# kubectl delete rc rc-nginx
replicationcontroller "rc-nginx" deleted
或
[root@node4 ~]# kubectl delete rc/rc-nginx
replicationcontroller "rc-nginx" deleted
五.定义service
http://kubernetes.io/docs/user-guide/services/
cat >nginx-service.json <<HERE
{
}
HERE
[root@node4 ~]#
kubectl create -f nginx-service.json
service "nginx-service" created
[root@node4 ~]# kubectl describe svc nginx-service
Name: nginx-service
Namespace: default
Labels:
Selector: app=nginx
Type: ClusterIP
IP: 10.1.177.130
Port: http 80/TCP
Endpoints: 10.1.43.4:80,10.1.56.3:80,10.1.79.2:80
Port: https 443/TCP
Endpoints: 10.1.43.4:443,10.1.56.3:443,10.1.79.2:443
Session Affinity: None
No events.
[root@node4 ~]# kubectl get svc nginx-service
NAME
nginx-service
[root@node4 ~]# kubectl get ep nginx-service
NAME
nginx-service
说明:一个Service可以有多个Pods同时工作,,类似负载均衡,当访问Service时,请求会被重定向到其中的一个Pod。但k8s目前采用的是iptables端口映射方式,而Docker官方最新的swarm(>=docker-engine-1.12.0)给我们下了一济猛料,直接采用lvs做负载,光从调度算法上看iptables就逊色得多。
如上显示,10.1.177.130是Service的虚拟地址,映射关系如下
10.1.177.130:80
->
10.1.177.130:443
->
注:需要注意的是,Service中的ClusterIP是无法ping通的,但在Minion桥接网络内访问80/443端口时可以访问到对应的资源。可以在创建Service的时候直接指定在--service-cluster-ip-range=10.1.0.0/16范围内的合法ClusterIP
实际测试中遇到这样一样问题,当pods运行在某一Minion上时,无法在该Minion上通过ClusterIP访问到pods
比如:当nginx只有一个pods并且运行在Minion2上,此时,在Minion1,Minion3上可以通过ClusterIP访问到对应资源,而在Minion2上则无法通过ClusterIP访问。不知道朋友们是否也遇到过同样的问题,还有待进一步研究
[root@node1 ~]# curl -I 10.1.177.130
HTTP/1.1 200 OK
Server: nginx/1.11.3
Date: Wed, 31 Aug 2016 08:57:39 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 26 Jul 2016 14:54:48 GMT
Connection: keep-alive
ETag: "579779b8-264"
Accept-Ranges: bytes
[root@node1 ~]# iptables -t nat -S|grep 10.1.177.130
-A KUBE-SERVICES -d 10.1.177.130/32 -p tcp -m comment --comment "default/my-service:http cluster IP" -m tcp --dport 80 -j KUBE-SVC-I37Z43XJW6BD4TLV
-A KUBE-SERVICES -d 10.1.177.130/32 -p tcp -m comment --comment "default/my-service:https cluster IP" -m tcp --dport 443 -j KUBE-SVC-CKR3WBBWMIGA5XGG
当有新Service定义时,所有的Minion节点上都会生成对应的iptables条目
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
spec:
---
kind: Service
apiVersion: v1
metadata:
spec:
2.创建deployment和service
[root@node4 ~]#
kubectl create -f
kubernetes-dashboard.yaml
deployment "kubernetes-dashboard" created
You have exposed your service on an external port on all nodes in your
cluster.
need to set up firewall rules for the service port(s) (tcp:31653) to serve traffic.
See http://releases.k8s.io/release-1.3/docs/user-guide/services-firewalls.md for more details.
service "kubernetes-dashboard" created
3.查看kubernetes-dashboard状态
[root@node4 ~]# kubectl get pods --namespace=kube-system
NAME
kubernetes-dashboard-2950980434-1d82j
kubernetes-dashboard-2950980434-3v5lz
删除kubernetes-dashboard
[root@node4 ~]# kubectl delete deployment kubernetes-dashboard --namespace=kube-system
deployment "kubernetes-dashboard" deleted
[root@node4 ~]# kubectl delete service kubernetes-dashboard --namespace=kube-system
service "kubernetes-dashboard" deleted
4.访问kubernetes-dashboard
i.通过Master转发
短链接:http://192.168.8.201:8080/ui
长链接:http://192.168.8.201:8080/api/v1/proxy/namespaces/kube-system/services/kubernetes-dashboard/
注意:这种方式需要Master和Minion桥接网络互通,最简单的是在Master上也启一个flannel,如果没有路由,会报如上错误。
flanneld -iface=eth0 -subnet-file=/etc/profile.d/flannel.env -etcd-endpoints=http://192.168.8.101:2379,http://192.168.8.102:2379,http://192.168.8.103:2379
2.ClusterIP
10.1.124.152
[root@node4 ~]# kubectl get svc --namespace=kube-system
NAME
kubernetes-dashboard
3.pods
[root@node4 ~]# kubectl get ep --namespace=kube-system
NAME
kubernetes-dashboard
10.1.43.2:9090
10.1.56.2:9090