前提环境
nfs环境和k8s环境已经安装好,这里不再演示。
搭建nfs client、storageclass
mkdir /nfs/nfs-share
mkdir /nfs/nacos-mysql-master
mkdir /nfs/nacos-mysql-slave
vi rbac.yml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
创建该资源:
kubectl apply -f rbac.yml
创建:client-provisioner-deployment.yml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: quay.io/external_storage/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: com.wj/nfs # 名称随你定义
- name: NFS_SERVER
value: 192.168.1.53 # NFS 服务IP
- name: NFS_PATH
value: /nfs/nfs-share # NFS 目录
volumes:
- name: nfs-client-root
nfs:
server: 192.168.1.53 # NFS 服务IP
path: /nfs/nfs-share # NFS 目录
kubectl apply -f client-provisioner-deployment.yml
定义storageclass.yml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs-storage # StorageClass名称,随你定义
provisioner: com.wj/nfs # 和client-provisioner-deployment.yml里定义的一致
parameters:
archiveOnDelete: "false"
搭建mysql主从
mysql主数据库
vi nacos-mysql-configmap.yml
apiVersion: v1
kind: ConfigMap
metadata:
name: nacos-mysql-cm
data:
nacos.mysql.master.root.password: "nacos_root" # 主数据库root密码
nacos.mysql.master.db: "febs_nacos" # 主数据库库名
nacos.mysql.master.user: "nacos" # 主数据库用户名
nacos.mysql.master.password: "nacos" # 主数据库密码
nacos.mysql.master.port: "3306" # 主数据库端口
nacos.mysql.replication.user: "nacos_ru" # 拷贝用的账户
nacos.mysql.replication.password: "nacos_ru" # 拷贝用的密码
nacos.mysql.slave.root.password: "nacos_root" # 从数据库root密码
nacos.mysql.slave.port: "3306" # 从数据库端口
创建configmap:
kubectl apply -f nacos-mysql-configmap.yml
部署nacos-mysql-master.yml:
apiVersion: v1
kind: ReplicationController
metadata:
name: nacos-mysql-master
labels:
name: nacos-mysql-master
spec:
replicas: 1
selector:
name: nacos-mysql-master
template:
metadata:
labels:
name: nacos-mysql-master
spec:
containers:
- name: master
image: nacos/nacos-mysql-master:latest # nacos-mysql-master镜像
ports:
- containerPort: 3306 # 端口3306
volumeMounts:
- mountPath: /var/lib/mysql
name: nacos-mysql-master-data
# 从nacos-mysql-cm中读取配置,加载到环境变量
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
configMapKeyRef:
name: nacos-mysql-cm
key: nacos.mysql.master.root.password
- name: MYSQL_DATABASE
valueFrom:
configMapKeyRef:
name: nacos-mysql-cm
key: nacos.mysql.master.db
- name: MYSQL_USER
valueFrom:
configMapKeyRef:
name: nacos-mysql-cm
key: nacos.mysql.master.user
- name: MYSQL_PASSWORD
valueFrom:
configMapKeyRef:
name: nacos-mysql-cm
key: nacos.mysql.master.password
- name: MYSQL_REPLICATION_USER
valueFrom:
configMapKeyRef:
name: nacos-mysql-cm
key: nacos.mysql.replication.user
- name: MYSQL_REPLICATION_PASSWORD
valueFrom:
configMapKeyRef:
name: nacos-mysql-cm
key: nacos.mysql.replication.password
volumes: # 指定nfs服务挂载
- name: nacos-mysql-master-data
nfs:
path: /nfs/nacos-mysql-master
server: 192.168.1.53
---
# 创建nacos主库service,供nacos server连接
apiVersion: v1
kind: Service
metadata:
name: mysql-master # 必须叫这个名字,因为nacos server内部根据这个名字连接数据库
labels:
name: mysql-master
spec:
ports:
- port: 3306
targetPort: 3306
selector:
name: nacos-mysql-master
kubectl apply -f nacos-mysql-master.yml
查看对应Pod状态:
kubectl get pods -w | grep nacos
要等到running状态后,再执行下一步
进入容器内部:
kubectl exec -it pod/nacos-mysql-master-nbw8r sh
查看数据库是否初始化:
exit退出数据库后,exit再退出容器,查看文件挂载:
ll /nfs/nacos-mysql-master
mysql从数据库
nacos-mysql-slave.yml:
apiVersion: v1
kind: ReplicationController
metadata:
name: nacos-mysql-slave
labels:
name: nacos-mysql-slave
spec:
replicas: 1
selector:
name: nacos-mysql-slave
template:
metadata:
labels:
name: nacos-mysql-slave
spec:
containers:
- name: slave
image: nacos/nacos-mysql-slave:latest
ports:
- containerPort: 3306
volumeMounts:
- mountPath: /var/lib/mysql
name: nacos-mysql-slave-data
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
configMapKeyRef:
name: nacos-mysql-cm
key: nacos.mysql.slave.root.password
- name: MYSQL_REPLICATION_USER
valueFrom:
configMapKeyRef:
name: nacos-mysql-cm
key: nacos.mysql.replication.user
- name: MYSQL_REPLICATION_PASSWORD
valueFrom:
configMapKeyRef:
name: nacos-mysql-cm
key: nacos.mysql.replication.password
volumes:
- name: nacos-mysql-slave-data
nfs:
path: /nfs/nacos-mysql-slave
server: 192.168.1.53
---
apiVersion: v1
kind: Service
metadata:
name: mysql-slave # 必须叫这个名字,因为nacos server内部根据这个名字连接数据库
labels:
name: mysql-slave
spec:
ports:
- port: 3306
targetPort: 3306
selector:
name: nacos-mysql-slave
kubectl apply -f nacos-mysql-slave.yml
创建后,观察Pod运行情况:
kubectl get pods -w | grep nacos
部署Nacos
nacos-server.yml:
---
apiVersion: v1
kind: Service
metadata:
name: nacos-headless
labels:
app: nacos
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
ports:
- port: 8848
name: server
targetPort: 8848
clusterIP: None
selector:
app: nacos
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: nacos
spec:
serviceName: nacos-headless
replicas: 2
template:
metadata:
labels:
app: nacos
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- nacos
topologyKey: "kubernetes.io/hostname"
serviceAccountName: nfs-client-provisioner
initContainers:
- name: peer-finder-plugin-install
image: nacos/nacos-peer-finder-plugin:latest
imagePullPolicy: Always
volumeMounts:
- mountPath: "/home/nacos/plugins/peer-finder"
name: plugindir
containers:
- name: nacos
imagePullPolicy: IfNotPresent
image: nacos/nacos-server:1.1.4
resources:
requests:
memory: "1.5Gi" # 官方是2Gi,迫于虚机内存压力,我改为了1.5Gi,实际生产环境你可以调大一点,避免OOM
cpu: "500m"
limits:
memory: "2Gi"
cpu: "1000m"
ports:
- containerPort: 8848
name: client-port
env:
- name: NACOS_REPLICAS
value: "2" # 这里仅部署2实例nacos server,原因还是迫于虚机内存压力
- name: SERVICE_NAME
value: "nacos-headless" # 通过headless service控制实例数量
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: MYSQL_MASTER_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: nacos-mysql-cm
key: nacos.mysql.master.db
- name: MYSQL_MASTER_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: nacos-mysql-cm
key: nacos.mysql.master.port
- name: MYSQL_SLAVE_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: nacos-mysql-cm
key: nacos.mysql.slave.port
- name: MYSQL_MASTER_SERVICE_USER
valueFrom:
configMapKeyRef:
name: nacos-mysql-cm
key: nacos.mysql.master.user
- name: MYSQL_MASTER_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: nacos-mysql-cm
key: nacos.mysql.master.password
- name: NACOS_SERVER_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
readinessProbe:
httpGet:
port: client-port
path: /nacos/v1/console/health/readiness
initialDelaySeconds: 60
timeoutSeconds: 3
livenessProbe:
httpGet:
port: client-port
path: /nacos/v1/console/health/liveness
initialDelaySeconds: 60
timeoutSeconds: 3
volumeMounts:
- name: plugindir
mountPath: /home/nacos/plugins/peer-finder
- name: datadir
mountPath: /home/nacos/data
- name: logdir
mountPath: /home/nacos/logs
# 通过PVC模板结合storageClassName自动创建PV,PVC
volumeClaimTemplates:
- metadata:
name: plugindir
spec:
storageClassName: managed-nfs-storage
accessModes: [ "ReadWriteMany" ]
resources:
requests:
storage: 5Gi
- metadata:
name: datadir
spec:
storageClassName: managed-nfs-storage
accessModes: [ "ReadWriteMany" ]
resources:
requests:
storage: 5Gi
- metadata:
name: logdir
spec:
storageClassName: managed-nfs-storage
accessModes: [ "ReadWriteMany" ]
resources:
requests:
storage: 5Gi
selector:
matchLabels:
app: nacos
---
# 为了在集群外部访问nacos web,这里直接用NodePort,您也可以使用Ingress
apiVersion: v1
kind: Service
metadata:
name: nacos-service
spec:
ports:
- port: 8001 # 对集群内部端口为8001
targetPort: 8848
nodePort: 30000 # 对外端口为30000
type: NodePort # 采用NodePort模式
selector:
app: nacos
查看Pod创建情况
kubectl get pods -w | grep nacos
等待全部变成running状态
进入容器内部观察:
kubectl exec -it pods/nacos-0 sh
cat /home/nacos/logs/start.out
日志内容如下:
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl kubectl exec [POD] -- [COMMAND] instead.
sh-4.2# cat /home/nacos/logs/start.out
,--.
,--.'|
,--,: : | Nacos 1.1.4
,`--.'`| ' : ,---. Running in cluster mode, All function modules
| : : | | ' ,' .--.--. Port: 8848
: | | : ,--.--. ,---. / / | / / ' Pid: 35
| : ' '; | / / . ; ,. :| : /`./ Console: http://nacos-0.nacos-headless.default.svc.cluster.local:8848/nacos/index.html
' ' ;. ;.--. .-. | / / '' | |: :| : ;_
| | | | \__/: . .. ' / ' | .; : `. https://nacos.io
' : | ; .' ," .--.; |' ; :__| : | `----.
| | '`--' / / ,. |' | '.'| / / /`--' /
' : | ; : .' : : `----' '--'. /
; |.' | , .-./ / `--'---'
'---' `--`---' `----'
2021-05-12 15:10:08,115 INFO The server IP list of Nacos is [nacos-0.nacos-headless.default.svc.cluster.local:8848, nacos-1.nacos-headless.default.svc.cluster.local:8848]
2021-05-12 15:10:09,188 INFO Nacos is starting...
2021-05-12 15:10:10,196 INFO Nacos is starting...
2021-05-12 15:10:11,200 INFO Nacos is starting...
2021-05-12 15:10:12,204 INFO Nacos is starting...
2021-05-12 15:10:13,207 INFO Nacos is starting...
2021-05-12 15:10:14,276 INFO Nacos is starting...
2021-05-12 15:10:15,279 INFO Nacos is starting...
2021-05-12 15:10:16,282 INFO Nacos is starting...
2021-05-12 15:10:17,286 INFO Nacos is starting...
2021-05-12 15:10:18,292 INFO Nacos is starting...
2021-05-12 15:10:19,295 INFO Nacos is starting...
2021-05-12 15:10:20,298 INFO Nacos is starting...
2021-05-12 15:10:21,303 INFO Nacos is starting...
2021-05-12 15:10:22,314 INFO Nacos is starting...
2021-05-12 15:10:23,322 INFO Nacos is starting...
2021-05-12 15:10:24,378 INFO Nacos is starting...
2021-05-12 15:10:25,382 INFO Nacos is starting...
2021-05-12 15:10:26,384 INFO Nacos is starting...
2021-05-12 15:10:27,384 INFO Nacos is starting...
2021-05-12 15:10:28,387 INFO Nacos is starting...
2021-05-12 15:10:29,389 INFO Nacos is starting...
2021-05-12 15:10:30,392 INFO Nacos is starting...
2021-05-12 15:10:31,413 INFO Nacos is starting...
2021-05-12 15:10:32,415 INFO Nacos is starting...
2021-05-12 15:10:33,417 INFO Nacos is starting...
2021-05-12 15:10:34,421 INFO Nacos is starting...
2021-05-12 15:10:35,423 INFO Nacos is starting...
2021-05-12 15:10:36,298 INFO Nacos Log files: /home/nacos/logs/
2021-05-12 15:10:36,299 INFO Nacos Conf files: /home/nacos/conf/
2021-05-12 15:10:36,299 INFO Nacos Data files: /home/nacos/data/
2021-05-12 15:10:36,299 INFO Nacos started successfully in cluster mode.
查看nacos-service:
kubectl describe service/nacos-service
绑定了两个端点。
访问nacos页面:http://192.168.1.53:30000/nacos
账号密码都是nacos
集群信息:
测试服务注册
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
<!-- https://mvnrepository.com/artifact/com.alibaba.cloud/spring-cloud-starter-alibaba-nacos-discovery -->
<dependency>
<groupId>com.alibaba.cloud</groupId>
<artifactId>spring-cloud-starter-alibaba-nacos-discovery</artifactId>
<version>2.2.1.RELEASE</version>
</dependency>
配置文件:
server.port=83
spring.application.name=myapp
spring.cloud.nacos.discovery.server-addr=192.168.1.53:30000
启动类:
@EnableDiscoveryClient
@SpringBootApplication
public class BootCloudApplication {
public static void main(String[] args) {
SpringApplication.run(BootCloudApplication.class, args);
}
}
启动后:服务已经注册到nacos中。
配置中心的功能自行测试,这里不再演示。