切换到自己的目录下如本文是放在/home/ubuntu下
cd /home/ubuntu
vim redis.conf
bind 0.0.0.0protected-mode yesport 6379requirepass qwe123456tcp-backlog 511timeout 0tcp-keepalive 300daemonize nopidfile /var/run/redis_6379.pidloglevel noticelogfile "/tmp/redis.log"databases 16always-show-logo nostop-writes-on-bgsave-error yesrdbcompression yesrdbchecksum yesdbfilename dump.rdbdir /data
bind 0.0.0.0
protected-mode yes
port 6379
requirepass qwe123456
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize no
pidfile /var/run/redis_6379.pid
loglevel notice
logfile "/tmp/redis.log"
databases 16
always-show-logo no
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir /data
vim redis.yaml
apiVersion: apps/v1kind: Deploymentmetadata: labels: app: redis-single-node name: redis-single-nodespec: progressDeadlineSeconds: 600 #部署进度截止时间 replicas: 1 #副本数 revisionHistoryLimit: 10 #修订历史记录限制数 selector: matchLabels: app: redis-single-node #选择器,用于选择匹配的Pod strategy: rollingUpdate: maxSurge: 1 maxUnavailable: 0 type: RollingUpdate template: metadata: creationTimestamp: null labels: app: redis-single-node spec: containers: - command: - sh - -c - redis-server "/mnt/redis.conf" env: - name: TZ value: Asia/Shanghai - name: LANG value: C.UTF-8 image: redis:5.0.4-alpine #Redis镜像版本 imagePullPolicy: IfNotPresent lifecycle: {} livenessProbe: failureThreshold: 2 #失败的最大次数2次 initialDelaySeconds: 10 #启动容器后10秒开始检测 periodSeconds: 10 #每过10s检测一次 successThreshold: 1 #只要成功了1次,就表示成功了。 tcpSocket: port: 6379 timeoutSeconds: 2 name: redis-single-node ports: - containerPort: 6379 name: web protocol: TCP readinessProbe: failureThreshold: 2 initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 tcpSocket: port: 6379 timeoutSeconds: 2 resources: #资源限制 limits: #最多可使用的资源 cpu: 100m #CPU的计量单位叫毫核(m)。一个节点的CPU核心数量乘以1000,得到的就是节点总的CPU总数量。如,一个节点有两个核,那么该节点的CPU总量为2000m memory: 339Mi requests: #代表容器启动请求的资源限制,分配的资源必须要达到此要求 cpu: 10m memory: 10Mi securityContext: #上下文参数 privileged: false #特权,最高权限 runAsNonRoot: false #禁止以root用户启动容器 true为禁止 terminationMessagePath: /dev/termination-log #表示容器的异常终止消息的路径,默认在 /dev/termination-log 下。当容器退出时,可以通过容器的状态看到退出信息。 terminationMessagePolicy: File #默认情况容器退出时,退出信息会从文件中读取。 可以修改为 FallbackToLogsOnError 从日志中读取 volumeMounts: - mountPath: /usr/share/zoneinfo/Asia/Shanghai name: tz-config - mountPath: /etc/localtime name: tz-config - mountPath: /etc/timezone name: timezone - mountPath: /mnt name: redis-conf readOnly: true dnsPolicy: ClusterFirst restartPolicy: Always schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 #在规定的terminationGracePeriodSeconds优雅时间内完成Pod优雅终止动作。默认是30秒 tolerations: #零容忍设置 - effect: NoExecute #即使在节点上存在污点,也不会将Pod从该节点上删除 key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 30 - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 30 volumes: - hostPath: path: /usr/share/zoneinfo/Asia/Shanghai type: "" name: tz-config - hostPath: path: /etc/timezone type: "" name: timezone - configMap: defaultMode: 420 name: redis-conf name: redis-conf
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis-single-node
name: redis-single-node
spec:
progressDeadlineSeconds: 600 #部署进度截止时间
replicas: 1 #副本数
revisionHistoryLimit: 10 #修订历史记录限制数
selector:
matchLabels:
app: redis-single-node #选择器,用于选择匹配的Pod
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
template:
creationTimestamp: null
containers:
- command:
- sh
- -c
- redis-server "/mnt/redis.conf"
env:
- name: TZ
value: Asia/Shanghai
- name: LANG
value: C.UTF-8
image: redis:5.0.4-alpine #Redis镜像版本
imagePullPolicy: IfNotPresent
lifecycle: {}
livenessProbe:
failureThreshold: 2 #失败的最大次数2次
initialDelaySeconds: 10 #启动容器后10秒开始检测
periodSeconds: 10 #每过10s检测一次
successThreshold: 1 #只要成功了1次,就表示成功了。
tcpSocket:
port: 6379
timeoutSeconds: 2
ports:
- containerPort: 6379
name: web
protocol: TCP
readinessProbe:
failureThreshold: 2
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
resources: #资源限制
limits: #最多可使用的资源
cpu: 100m #CPU的计量单位叫毫核(m)。一个节点的CPU核心数量乘以1000,得到的就是节点总的CPU总数量。如,一个节点有两个核,那么该节点的CPU总量为2000m
memory: 339Mi
requests: #代表容器启动请求的资源限制,分配的资源必须要达到此要求
cpu: 10m
memory: 10Mi
securityContext: #上下文参数
privileged: false #特权,最高权限
runAsNonRoot: false #禁止以root用户启动容器 true为禁止
terminationMessagePath: /dev/termination-log #表示容器的异常终止消息的路径,默认在 /dev/termination-log 下。当容器退出时,可以通过容器的状态看到退出信息。
terminationMessagePolicy: File #默认情况容器退出时,退出信息会从文件中读取。 可以修改为 FallbackToLogsOnError 从日志中读取
volumeMounts:
- mountPath: /usr/share/zoneinfo/Asia/Shanghai
name: tz-config
- mountPath: /etc/localtime
- mountPath: /etc/timezone
name: timezone
- mountPath: /mnt
name: redis-conf
readOnly: true
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30 #在规定的terminationGracePeriodSeconds优雅时间内完成Pod优雅终止动作。默认是30秒
tolerations: #零容忍设置
- effect: NoExecute #即使在节点上存在污点,也不会将Pod从该节点上删除
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 30
- effect: NoExecute
key: node.kubernetes.io/not-ready
volumes:
- hostPath:
path: /usr/share/zoneinfo/Asia/Shanghai
type: ""
path: /etc/timezone
- configMap:
defaultMode: 420
kubectl create cm redis-conf --from-file=redis.conf
kubectl create -f redis.yaml
这里使用Helm安装所以需要先安装一下Helm,如果已经安装跳过2.1这个小步骤
wget https://get.helm.sh/helm-v3.12.1-linux-amd64.tar.gz
tar -zxvf helm-v3.12.1-linux-amd64.tar.gz
mv linux-amd64/helm /usr/local/bin/helm
helm repo add bitnami https://charts.bitnami.com/bitnami
vim mongodb-pv.yaml
apiVersion: v1kind: PersistentVolumemetadata: name: mongodb-pvspec: capacity: storage: 5Gi accessModes: - ReadWriteOnce hostPath: path: /bitnami/mongodb/data
apiVersion: v1
kind: PersistentVolume
name: mongodb-pv
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /bitnami/mongodb/data
以上内容中/bitnami/mongodb/data是主机真实路径,小提示如果没有权限需要赋权限给uid为1001
vim mongodb-pvc.yaml
apiVersion: v1kind: PersistentVolumeClaimmetadata: name: mongodb-pvcspec: accessModes: - ReadWriteOnce resources: requests: storage: 5Gi volumeName: mongodb-pv
kind: PersistentVolumeClaim
name: mongodb-pvc
resources:
requests:
volumeName: mongodb-pv
vim mongodb-values.yaml
persistence: enabled: true existingClaim: "mongodb-pvc"securityContext: privileged: true runAsUser: 1001 runAsGroup: 1001 fsGroup: 1001auth: rootPassword: "自定义密码"
persistence:
enabled: true
existingClaim: "mongodb-pvc"
securityContext:
privileged: true
runAsUser: 1001
runAsGroup: 1001
fsGroup: 1001
auth:
rootPassword: "自定义密码"
创建完以上三个文件之后按顺序执行如下:
①kubectl apply -f mongodb-pv.yaml
kubectl apply -f mongodb-pv.yaml
②kubectl apply -f mongodb-pvc.yaml
kubectl apply -f mongodb-pvc.yaml
③helm install my-mongodb bitnami/mongodb -f mongodb-values.yaml --set volumePermissions.enabled=true
helm install my-mongodb bitnami/mongodb -f mongodb-values.yaml --set volumePermissions.enabled=true
提示--set volumePermissions.enabled=true第③必须加这个不然pod创建的时候没有权限创建文件夹及文件会报错mkdir: cannot create directory '/bitnami/mongodb/data': Permission denied
安装成功之后如果想让外网访问跟上面redis一样service编辑type为NodePort,设置nodePort: 27017,端口号自定义只要防火墙对外开放就行
卸载使用helm uninstall my-mongodb
mongodb://root:密码@ip:端口
先切换至admin库use admin再执行修改 db.changeUserPassword("用户名","密码")
use admin
db.changeUserPassword("用户名","密码")
前置条件参考第2步安装MongoDB中的创建pv跟pvc命名为kafka-pv和kafka-pvc
replicaCount: 1 # kafka 副本数#global:# storageClass: nfs-client # kafka 和 zookeeper 使用的存储heapOpts: "-Xmx1024m -Xms1024m" # kafka 启动的 jvm 参数persistence: # kafka 每个副本的存储空间 enabled: true existingClaim: "kafka-pvc"resources: limits: cpu: 1000m memory: 2Gi requests: cpu: 100m memory: 100Mizookeeper: replicaCount: 1 # zookeeper 的副本数 persistence: enabled: true existingClaim: "kafka-pvc" resources: limits: cpu: 2000m memory: 2GiexternalAccess: enabled: true # 开启外部访问 autoDiscovery: enabled: true service: type: NodePort # 开启 nodeport ports: external: 9094 nodePorts: # nodeport 对应的端口,多少个 kafka 副本对应多少个端口 - 30001# - 30002# - 30003
replicaCount: 1 # kafka 副本数
#global:
# storageClass: nfs-client # kafka 和 zookeeper 使用的存储
heapOpts: "-Xmx1024m -Xms1024m" # kafka 启动的 jvm 参数
persistence: # kafka 每个副本的存储空间
existingClaim: "kafka-pvc"
limits:
cpu: 1000m
memory: 2Gi
cpu: 100m
memory: 100Mi
zookeeper:
replicaCount: 1 # zookeeper 的副本数
cpu: 2000m
externalAccess:
enabled: true # 开启外部访问
autoDiscovery:
service:
type: NodePort # 开启 nodeport
external: 9094
nodePorts: # nodeport 对应的端口,多少个 kafka 副本对应多少个端口
- 30001
# - 30002
# - 30003
执行部署helm install my-kafka bitnami/kafka -f kafka-values.yaml --set volumePermissions.enabled=true --set rbac.create=true
helm install my-kafka bitnami/kafka -f kafka-values.yaml --set volumePermissions.enabled=true --set rbac.create=true
apiVersion: apps/v1kind: Deploymentmetadata: name: kafka-console-ui namespace: defaultspec: replicas: 1 selector: matchLabels: app: kafka-console-ui template: metadata: labels: app: kafka-console-ui spec: containers: - name: kafka-console-ui resources: limits: cpu: 1000m memory: 1Gi requests: cpu: 10m memory: 10Mi image: wdkang/kafka-console-ui:latest volumeMounts: - mountPath: /etc/localtime readOnly: true name: time-data volumes: - name: time-data hostPath: path: /usr/share/zoneinfo/Asia/Shanghai
name: kafka-console-ui
namespace: default
replicas: 1
app: kafka-console-ui
- name: kafka-console-ui
memory: 1Gi
image: wdkang/kafka-console-ui:latest
name: time-data
- name: time-data
kind: ServiceapiVersion: v1metadata: labels: app: kafka-console-ui name: kafka-console-ui namespace: defaultspec: ports: - port: 7766 targetPort: 7766 nodePort: 30088 selector: app: kafka-console-ui type: NodePort
kind: Service
- port: 7766
targetPort: 7766
nodePort: 30088
type: NodePort
①kubectl apply -f kafka-console-ui-service.yaml
kubectl apply -f kafka-console-ui-service.yaml
②kubectl apply -f kafka-console-ui-deploy.yaml
kubectl apply -f kafka-console-ui-deploy.yaml
kubectl rollout restart statefulset my-kafka -n default
原文链接:https://www.cnblogs.com/wuyubing/p/17576508.html
本站QQ群:前端 618073944 | Java 606181507 | Python 626812652 | C/C++ 612253063 | 微信 634508462 | 苹果 692586424 | C#/.net 182808419 | PHP 305140648 | 运维 608723728