docker的加速度地址
[root@temple ~]# cat /etc/docker/daemon.json
{
"log-driver": "json-file",
"log-opts": {
"max-size": "10m",
"max-file": "5"
},
"registry-mirrors": [
"https://docker.sunzishaokao.com",
"https://docker.1panel.live",
"https://docker.kejilion.pro",
"https://docker-registry.nmqu.com"
]
}
[root@temple ~]#
破解MobaXterm
[root@node01 ~]# docker run -it -d \
-p 80:80 \
jockerdragon/mobaxterm-crack:latest
将生成的
Custom.mxtpro文件复制到 MobaXterm 的安装目录或便携版路径
关于k8s的md
初始化主节点
kubeadm init --apiserver-advertise-address=192.168.66.11 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version 1.29.2 --service-cidr=10.10.0.0/12 --pod-network-cidr=10.244.0.0/16 --ignore-preflight-errors=all --cri-socket unix:///var/run/cri-dockerd.sock
work token 过期后,重新申请
kubeadm token create --print-join-command
worker 加入
kubeadm join 192.168.10.11:6443 --token a6xh07.yg9wh2vru2grluwb --discovery-token-ca-cert-hash sha256:7cd8499abae48c8403800152cc0f655ac704ea00ae30a549acd9bbac7b26dca4 --cri-socket unix:///var/run/cri-dockerd.sock
部署 NFS 服务
yum -y install nfs-utils rpcbind
systemctl enable --now nfs-server
mkdir -p /data/nfs
chmod 777 /data/nfs
vim /etc/exports
/data/nfs 192.168.40.0/24(rw,sync,no_root_squash)
Helm 部署 NFS 存储
#添加 Helm 仓库 执行以下命令添加 Helm 仓库
helm repo add nfs-subdir-external-provisioner https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
#拉取镜像和打标签
docker pull swr.cn-north-4.myhuaweicloud.com/ddn-k8s/k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
docker tag swr.cn-north-4.myhuaweicloud.com/ddn-k8s/k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2 k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
#不行的就直接替换values.yaml的镜像地址
swr.cn-north-4.myhuaweicloud.com/ddn-k8s/k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner
#使用 Helm 部署 Provisioner 执行以下命令部署:
helm install nfs-provisioner -f values.yaml . --set nfs.server=192.168.40.3 --set nfs.path=/data/nfs --set storageClass.name=nfs-client --set storageClass.defaultClass=true --set image.tag=v4.0.2
监控k8s同过helm部署
#拉取镜像
docker pull swr.cn-north-4.myhuaweicloud.com/ddn-k8s/quay.io/prometheus/prometheus:v2.54.1
docker pull swr.cn-north-4.myhuaweicloud.com/ddn-k8s/quay.io/prometheus/alertmanager:v0.27.0
docker pull swr.cn-north-4.myhuaweicloud.com/ddn-k8s/quay.io/prometheus-operator/prometheus-operator:v0.76.2
docker pull swr.cn-north-4.myhuaweicloud.com/ddn-k8s/registry.k8s.io/prometheus-adapter/prometheus-adapter:v0.12.0
docker tag swr.cn-north-4.myhuaweicloud.com/ddn-k8s/registry.k8s.io/prometheus-adapter/prometheus-adapter:v0.12.0 registry.k8s.io/prometheus-adapter/prometheus-adapter:v0.12.0
docker pull swr.cn-north-4.myhuaweicloud.com/ddn-k8s/quay.io/prometheus/node-exporter:v1.8.2
docker pull swr.cn-north-4.myhuaweicloud.com/ddn-k8s/quay.io/prometheus/blackbox-exporter:v0.25.0
docker pull swr.cn-north-4.myhuaweicloud.com/ddn-k8s/registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.13.0
docker pull swr.cn-north-4.myhuaweicloud.com/ddn-k8s/ghcr.io/jimmidyson/configmap-reload:v0.13.1
docker pull swr.cn-north-4.myhuaweicloud.com/ddn-k8s/quay.io/brancz/kube-rbac-proxy:v0.18.1
docker pull grafana/grafana:11.2.0
#打标签
docker tag swr.cn-north-4.myhuaweicloud.com/ddn-k8s/quay.io/prometheus/prometheus:v2.54.1 quay.io/prometheus/prometheus:v2.54.1
docker tag swr.cn-north-4.myhuaweicloud.com/ddn-k8s/quay.io/prometheus/alertmanager:v0.27.0 quay.io/prometheus/alertmanager:v0.27.0
docker tag swr.cn-north-4.myhuaweicloud.com/ddn-k8s/quay.io/prometheus-operator/prometheus-operator:v0.76.2 quay.io/prometheus-operator/prometheus-operator:v0.76.2
docker tag swr.cn-north-4.myhuaweicloud.com/ddn-k8s/quay.io/prometheus/node-exporter:v1.8.2 quay.io/prometheus/node-exporter:v1.8.2
docker tag swr.cn-north-4.myhuaweicloud.com/ddn-k8s/quay.io/prometheus/blackbox-exporter:v0.25.0 quay.io/prometheus/blackbox-exporter:v0.25.0
docker tag swr.cn-north-4.myhuaweicloud.com/ddn-k8s/registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.13.0 registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.13.0
docker tag swr.cn-north-4.myhuaweicloud.com/ddn-k8s/ghcr.io/jimmidyson/configmap-reload:v0.13.1 ghcr.io/jimmidyson/configmap-reload:v0.13.1
docker tag swr.cn-north-4.myhuaweicloud.com/ddn-k8s/quay.io/brancz/kube-rbac-proxy:v0.18.1 quay.io/brancz/kube-rbac-proxy:v0.18.1
#开始部署kube-prometheus
kubectl apply --server-side -f manifests/setup
kubectl wait \
--for condition=Established \
--all CustomResourceDefinition \
--namespace=monitoring
kubectl apply -f manifests/
#prometheus operator内部默认配置了NetworkPolicy,需要删除其对应的资源,才可以通过外网访问
kubectl delete -f manifests/prometheus-networkPolicy.yaml
kubectl delete -f manifests/grafana-networkPolicy.yaml
kubectl delete -f manifests/alertmanager-networkPolicy.yaml
Kubernetes使用Helm部署Gitea仓库
#配置Helm源
helm repo add gitea https://dl.gitea.io/charts
helm repo update
#导出Gitea配置文件values.yaml
helm show values gitea/gitea > values.yaml
#修改values.yaml 修改持久化配置指定storngClass
#修改Service配置类型为NodePort
#安装启动gitea
helm install gitea -f values.yaml gitea/gitea
[root@ser10 gitea]# kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services gitea-http ;echo
32367
[root@ser10 gitea]#
#访问IP地址加端口号
#http://192.168.40.4:32367/
k8s部署wordprss
helm install wordpress bitnami/wordpress \
--namespace=wordpress \
--create-namespace \
--set global.storageClass=nfs-client \
--set wordpressUsername=admin \
--set wordpressPassword=Root.000 \
--set replicaCount=2 \
--set service.type=NodePort \
--set service.nodePorts.http=30808 \
--set persistence.enabled=true \
--set persistence.size=10Gi \
--set volumePermissions.enabled=true \
--set mariadb.enabled=true \
--set mariadb.architecture=standalone \
--set mariadb.auth.rootPassword=Root.000 \
--set mariadb.auth.password=Root.000 \
--set mariadb.primary.persistence.enabled=true \
--set mariadb.primary.persistence.size=8Gi \
--set memcached.enabled=true \
--set wordpressConfigureCache=true \
--set ingress.enabled=true \
--set ingress.hostname=wordpress.wubian.com
go docker ssh client
cat main.go
package main
import (
"context"
"fmt"
"github.com/moby/moby/client"
)
func main() {
fmt.Println("-----------------")
cli, err := client.New(
client.WithHost("tcp://192.168.40.11:2375"),
)
if err != nil {
panic(err)
}
res, _ := cli.ContainerList(context.Background(), client.ContainerListOptions{
All: true,
})
for _, c := range res.Items {
fmt.Printf("containernames: %s\n", c.Names[0][1:])
}
}
在服务端要开启2375端口,最重要的是ExecStart=/usr/bin/dockerd --containerd=/run/containerd/containerd.sock给注释掉。换成ExecStart=/usr/bin/dockerd -H unix:///var/run/docker.sock -H tcp://0.0.0.0:2375 --containerd=/run/containerd/containerd.sock
cat /lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
#ExecStart=/usr/bin/dockerd --containerd=/run/containerd/containerd.sock
ExecStart=/usr/bin/dockerd -H unix:///var/run/docker.sock -H tcp://0.0.0.0:2375 --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutStartSec=0
RestartSec=2
Restart=always
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target