git clone https://github.com/kubernetes/heapster.git
kubectl apply -f heapster/deploy/kube-config/influxdb/
kubectl apply -f heapster/deploy/kube-config/rbac/heapster-rbac.yaml
#通过上面yaml部署之后,会报无法拉去镜像k8s.gcr.io,找到相关镜像并tag(docker search到的)
#修改monitoring-grafana这个svc的Type为NodePort或者新建一个ingress以暴露服务供外界访问
kubectl edit svc monitoring-grafana
遇到的问题
1、heapster pod中报如下错误(10255端口不通,k8s默认使用10250作为kubelet端口)
E0730 11:24:05.015741 1 manager.go:101] Error in scraping containers from kubelet:172.31.2.131:10255: failed to get all container stats from Kubelet URL "http://172.31.2.131:10255/stats/container/": Post http://172.31.2.131:10255/stats/container/: dial tcp 172.31.2.131:10255: getsockopt: connection refused
解决:
kubectl edit deployment heapster
修改
- --
source
=kubernetes:https:
//kubernetes
.default
为
- --
source
=kubernetes:kubernetes:https:
//kubernetes
.default?useServiceAccount=
true
&kubeletHttps=
true
&kubeletPort=10250&insecure=
true
2. heapster pod中报如下错误(rbac权限不够,但是看了heapster这个clusterrolebinding,发现是有足够的权限get list watch pod的)
E0823 02:26:05.018478 1 kubelet.go:239] error while getting containers from Kubelet 172.16.0.12:10250: failed to get all container stats from Kubelet URL "https://172.16
.0.12:10250/stats/container/": request failed - "403 Forbidden", response: "Forbidden (user=system:serviceaccount:kube-system:heapster, verb=create, resource=nodes, subresourc
e=stats)"
临时解决:删除crb kubectl delete clusterrolebinding heapster
新建一个绑定更高clusterrole的crb并重启pod
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: heapster roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin #cluster-admin的权限 subjects: - kind: ServiceAccount name: heapster namespace: kube-system
验证
通过使用nodeIp:NodePort访问服务,可以看到默认的Cluster和Pod统计图
使用kubectl top node如下
[root@node1 kubelet.service.d]# kc top node
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
node1.com 175m 8% 2285Mi 61%
node2.com 97m 4% 2094Mi 56%
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
node1.com 175m 8% 2285Mi 61%
node2.com 97m 4% 2094Mi 56%
[root@node1 kubelet.service.d]# kc top pod
NAME CPU(cores) MEMORY(bytes)
altered-horse-mysql-5fd7bcc98c-9sc7n 2m 241Mi
austere-llama-mariadb-7f578657c6-9rd84 3m 143Mi
austere-llama-wordpress-75cbff959-tz469 3m 223Mi
coredns-78fcdf6894-5bwc7 1m 11Mi
coredns-78fcdf6894-x79ww 1m 12Mi
default-http-backend-784cf5f7b5-p9mcd 0m 2Mi
etcd-node1.com 16m 186Mi
heapster-798c4b6988-xmnzp 2m 51Mi
kube-apiserver-node1.com 25m 306Mi
kube-controller-manager-node1.com 28m 64Mi
kube-flannel-ds-8zcwd 1m 16Mi
kube-flannel-ds-gldqx 1m 18Mi
kube-proxy-szb9p 3m 15Mi
kube-proxy-tq7d7 3m 15Mi
kube-scheduler-node1.com 8m 19Mi
monitoring-grafana-77fbf95454-4k752 0m 15Mi
monitoring-influxdb-56b597d5c5-55kpf 2m 65Mi
nginx-ingress-controller-84b8b6df66-46848 3m 138Mi
tiller-deploy-7b5cb97898-dnmc2 0m 15Mi
NAME CPU(cores) MEMORY(bytes)
altered-horse-mysql-5fd7bcc98c-9sc7n 2m 241Mi
austere-llama-mariadb-7f578657c6-9rd84 3m 143Mi
austere-llama-wordpress-75cbff959-tz469 3m 223Mi
coredns-78fcdf6894-5bwc7 1m 11Mi
coredns-78fcdf6894-x79ww 1m 12Mi
default-http-backend-784cf5f7b5-p9mcd 0m 2Mi
etcd-node1.com 16m 186Mi
heapster-798c4b6988-xmnzp 2m 51Mi
kube-apiserver-node1.com 25m 306Mi
kube-controller-manager-node1.com 28m 64Mi
kube-flannel-ds-8zcwd 1m 16Mi
kube-flannel-ds-gldqx 1m 18Mi
kube-proxy-szb9p 3m 15Mi
kube-proxy-tq7d7 3m 15Mi
kube-scheduler-node1.com 8m 19Mi
monitoring-grafana-77fbf95454-4k752 0m 15Mi
monitoring-influxdb-56b597d5c5-55kpf 2m 65Mi
nginx-ingress-controller-84b8b6df66-46848 3m 138Mi
tiller-deploy-7b5cb97898-dnmc2 0m 15Mi