sed -i.bak "s#http://download.proxmox.com/images#https://mirrors.nju.edu.cn/proxmox/images#g" /usr/share/perl5/PVE/APLInfo.pm
wget -O /var/lib/pve-manager/apl-info/mirrors.nju.edu.cn https://mirrors.nju.edu.cn/proxmox/images/aplinfo-pve-7.dat
systemctl restart pvedaemon.service
# 第一种
sed -Ezi.bak "s/(Ext.Msg.show\(\{\s+title: gettext\('No valid sub)/void\(\{ \/\/\1/g" /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js && systemctl restart pveproxy.service
# 第二种 打开 /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js
# 搜索 No valid subscription
# 将弹窗的判断改为 false 永远不被触发
ceph osd tree # 列出磁盘状态
ceph osd crush rule create-replicated HDD_rule default host hdd
ceph osd crush rule create-replicated SSD_rule default host ssd
ceph osd crush rule create-replicated NVME_rule default host nvme
######### 更换debian源 修改 /etc/apt/sources.list
vi /etc/apt/sources.list
# 注释官方源
# deb http://ftp.debian.org/debian bookworm main contrib
# deb http://ftp.debian.org/debian bookworm-updates main contrib
# security updates
# deb http://security.debian.org bookworm-security main contrib
# 使用中科大源
deb http://mirrors.ustc.edu.cn/debian bookworm main contrib non-free non-free-firmware
deb http://mirrors.ustc.edu.cn/debian bookworm-updates main contrib non-free non-free-firmware
deb http://mirrors.ustc.edu.cn/debian-security/ bookworm-security main contrib non-free non-free-firmware
deb http://mirrors.ustc.edu.cn/debian bookworm-backports main contrib non-free non-free-firmware
######### 更换pve源
# 删掉pve-enterprise.list
cd /etc/apt/sources.list.d
mv pve-enterprise.list pve-enterprise.list.bak
echo "deb https://mirrors.ustc.edu.cn/proxmox/debian/pve bookworm pve-no-subscription" > /etc/apt/sources.list.d/pve-no-subscription.list
######### 更换Ceph源
echo "deb https://mirrors.ustc.edu.cn/proxmox/debian/ceph-quincy bookworm no-subscruption" > /etc/apt/sources.list.d/ceph.list
# 编辑 /usr/share/perl5/PVE/CLI/pveceph.pm
# 将 download.proxmox.com 改为 mirrors.ustc.edu.cn/proxmox
# 停止集群服务
systemctl stop pve-cluster
# 停止同步
systemctl stop corosync
# 改为本地模式
pmxcfs -l
# 删除corosync配置文件
rm /etc/pve/corosync.conf
rm -r /etc/corosync/*
# 重新启动
killall pmxcfs
systemctl start pve-cluster
# 集群移除节点
pvecm delnode nodename
# 分区 & 格式化
fdisk -l # 查看磁盘信息
fdisk /dev/sdb # 根据实际的硬盘填
# n p 回车 回车 回车 w
cat /etc/fstab # 查看当前系统自动挂载的磁盘列表,查看磁盘格式
mkfs.xfs /dev/sdb1 # 格式化磁盘
mkdir /data
mount /dev/sdb1 /data # 挂载磁盘
vim /etc/fstab # 添加自动挂载
# /dev/sdb1 /data xfs defaults 0 0
# 下载二进制文件
wget https://dl.min.io/server/minio/release/linux-amd64/minio
chmod +x minio
mv minio /usr/local/bin/
# 创建service服务文件 /etc/systemd/system/minio.service
vim /usr/lib/systemd/system/minio.service
[Unit]
Description=MinIO
Documentation=https://min.io/docs/minio/linux/index.html
Wants=network-online.target
After=network-online.target
AssertFileIsExecutable=/usr/local/bin/minio
[Service]
WorkingDirectory=/usr/local
User=root
Group=root
ProtectProc=invisible
EnvironmentFile=-/etc/default/minio
ExecStartPre=/bin/bash -c "if [ -z \"${MINIO_VOLUMES}\" ]; then echo \"Variable MINIO_VOLUMES not set in /etc/default/minio\"; exit 1; fi"
ExecStart=/usr/local/bin/minio server $MINIO_OPTS $MINIO_VOLUMES
# MinIO RELEASE.2023-05-04T21-44-30Z adds support for Type=notify (https://www.freedesktop.org/software/systemd/man/systemd.service.html#Type=)
# This may improve systemctl setups where other services use `After=minio.server`
# Uncomment the line to enable the functionality
# Type=notify
# Let systemd restart this service always
Restart=always
# Specifies the maximum file descriptor number that can be opened by this process
LimitNOFILE=65536
# Specifies the maximum number of threads this process can create
TasksMax=infinity
# Disable timeout logic and wait until process is stopped
TimeoutStopSec=infinity
SendSIGKILL=no
[Install]
WantedBy=multi-user.target
# 创建环境变量文件
vim /etc/default/minio
MINIO_ROOT_USER=minio
MINIO_ROOT_PASSWORD=minio@123
MINIO_VOLUMES="/mnt/data"
MINIO_OPTS="--console-address :9001"
# 加载服务
systemctl daemon-reload
# 启动服务
systemctl start minio.service
# 开机自启动
systemctl enable minio.service
# 安装Erlang
curl -s https://packagecloud.io/install/repositories/rabbitmq/erlang/script.rpm.sh | sudo bash
yum -y install erlang
yum install socat logrotate -y
# 安装RabbitMQ
curl -s https://packagecloud.io/install/repositories/rabbitmq/rabbitmq-server/script.rpm.sh | sudo bash
yum install rabbitmq-server -y
# 启动
systemctl start rabbitmq-server
# 开机自启动
systemctl enable rabbitmq-server
# 开启web控制台
rabbitmq-plugins enable rabbitmq_management
# 创建用户、vhost、设置权限
rabbitmqctl add_user admin password
rabbitmqctl add_vhost /vhost
rabbitmqctl set_user_tags admin administrator
rabbitmqctl set_permissions -p /vhost admin ".*" ".*" ".*"
# 下载延迟消息插件 https://www.rabbitmq.com/community-plugins
5672、15672、4369、25672
/etc/rabbitmq/advanced.config
/etc/rabbitmq/rabbitmq.conf
/var/lib/rabbitmq/.erlang.cookie
$HOME/.erlang.cookie
可以通过 rabbitmq-diagnostics erlang_cookie_sources 命令查看
rabbitmq1 192.168.0.220
rabbitmq2 192.168.0.221
修改/etc/hosts文件
192.168.0.220 rabbitmq1
192.168.0.221 rabbitmq2
修改hostname
hostnamectl set-hostname rabbitmq1 # rabbitmq2
安装RabbitMQ参照上面的教程
rabbitmq2加入集群
rabbitmqctl stop_app
rabbitmqctl reset
rabbitmqctl join_cluster rabbit@rabbitmq1 # 一定记得修改/etc/hosts文件
rabbitmqctl start_app
设置策略
# 下载源码包 6.2.14
https://download.redis.io/releases/redis-6.2.14.tar.gz
# 安装 make gcc等
yum install -y gcc gcc-c++ make automake systemd-devel autoconf libtool pcre pcre-devel zlib zlib-devel openssl openssl-devel
# make
make USE_SYSTEMD=yes
# make install 安装到/opt/redis目录
make PREFIX=/opt/redis install
# 配置redis
vim /etc/redis/redis.conf
port 6379
bind 0.0.0.0
requirepass yx123123=
appendonly yes
appendfsync everysec
daemonize yes # 后台启动
dir /opt/redis # 数据目录
# 指定配置文件启动
# /opt/redis/bin/redis-server /etc/redis/redis.conf
# redis.service
vim /etc/systemd/system/redis.service
[Unit]
Description=Redis In-Memory Data Store
After=network.target
[Service]
User=redis
Group=redis
ExecStart=/opt/redis/bin/redis-server /etc/redis/redis.conf
ExecStop=/opt/redis/bin/redis-cli shutdown
Restart=always
[Install]
WantedBy=multi-user.target
# 加载服务
systemctl daemon-reload
# 开机启动
systemctl enable redis.service
# 启动
systemctl start redis.service
服务器规划
master 192.168.0.110
slave1 192.168.0.111
slave2 192.168.0.112
sentinel1 192.168.0.113
sentinel2 192.168.0.114
sentinel3 192.168.0.115
配置
# master节点
port 6379
bind 0.0.0.0
requirepass yx123123=
appendonly yes
appendfsync everysec
daemonize yes
dir /opt/redis/data/6379
repl-diskless-sync no
repl-backlog-size 1mb
# slave1 / slave2
port 6379
bind 0.0.0.0
requirepass yx123123=
appendonly yes
appendfsync everysec
daemonize yes
dir /opt/redis/data/6379
replicaof 192.168.0.110 6379
masterauth yx123123=
replica-serve-stale-data yes
replica-read-only yes
repl-diskless-sync no
repl-backlog-size 1mb
# sentinel
port 26379
sentinel monitor master1 192.168.0.110 6379 2
sentinel auth-pass master1 yx123123=
daemonize yes
启动
/opt/redis/bin/redis-server /etc/nginx/redis.conf
/opt/redis/bin/redis-server /etc/nginx/sentinel.conf --sentinel
# 安装jdk11,配置环境变量,略过
# 创建用户和组
groupadd esuser
useradd -d /home/esuser -m esuser -g esuser
# 修改一些系统配置
vi /etc/sysctl.conf
net.ipv4.ip_forward=1
vm.max_map_count=262144
vi /etc/security/limits.d/90-nproc.conf # 文件名不一定
将 * soft nproc 1024 改为 * soft nproc 4096
vi /etc/security/limits.conf # 加入以下配置
* soft nofile 65536
* hard nofile 65536
* soft nproc 32000
* hard nproc 32000
* hard memlock unlimited
* soft memlock unlimited
vi /etc/systemd/system.conf # 修改以下内容
DefaultLimitNOFILE=65536
DefaultLimitNPROC=32000
DefaultLimitMEMLOCK=infinity
# 重启电脑
# 切换用户,进入家目录
su esuser
cd /home/esuser
# 下载文件并解压
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.14.2-linux-x86_64.tar.gz
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.14.2-linux-x86_64.tar.gz.sha512
shasum -a 512 -c elasticsearch-7.14.2-linux-x86_64.tar.gz.sha512
tar -xzf elasticsearch-7.14.2-linux-x86_64.tar.gz
cd elasticsearch-7.14.2/
# 修改配置文件
vim config/elasticsearch.yml
node.name: es01 # 节点名
node.master: true # 可以被选为master节点
node.data: true # 数据节点
path.data: /home/esuser/data # 数据存放目录
path.logs: /home/esuser/logs # 日志目录
bootstrap.memory_lock: true # 必须,禁用swap
network.host: 192.168.110.222 # 本机ip
http.port: 9200
transport.port: 9300
http.cors.enabled: true
http.cors.allow-origin: "*"
discovery.seed_hosts: ["192.168.110.221:9300", "192.168.110.224:9300"] # 集群其他节点
cluster.initial_master_nodes: ["es01"] # 初始化时的master节点
# 启动es
bin/elasticsearch -d
1.安装zookeeper
安装jdk,配置环境变量
# rpm安装jdk1.8+,配置JAVA_HOME环境变量,略过
# 解压zookeeper,配置ZK_HOME环境变量,略过
# 修改zookeeper配置
cp $ZK_HOME/conf/zoo_example.cfg zoo.cfg
vim zoo.cfg
tickTime=2000
dataDir=/var/lib/zookeeper # 数据目录,根据实际情况修改
clientPort=2181
# 以下为集群配置
server.1=zk1:2888:3888
server.2=zk2:2888:3888
server.3=zk3:2888:3888
#在数据目录创建myid文件,并将server对应的id写入其中
vim myid
1
# 启动zk服务
zkServer.sh start
2.安装clickhouse
下载地址:https://github.com/ClickHouse/ClickHouse/releases
需要下载 clickhouse-client、clickhouse-common-static-dbg、clickhouse-common-static、clickhouse-server四个文件
# 使用rpm安装,下载的文件
rpm -ivh ./clickhouse-*.rpm
# 修改配置文件
vim /etc/clickhouse-server/config.xml
# 允许外部访问
<listen_host>::</listen_host>
# 数据目录,根据实际情况修改
<path>/ck/data/clickhouse/</path>
<tmp_path>/ck/data/clickhouse/tmp/</tmp_path>
# 时区
<timezone>Asia/Shanghai</timezone>
# 集群配置,根据实际情况修改
<remote_servers>
<my_cluster>
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>ck1</host>
<port>9000</port>
<user>default</user>
<password>NtFRw3F4</password>
</replica>
</shard>
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>ck2</host>
<port>9000</port>
<user>default</user>
<password>NtFRw3F4</password>
</replica>
</shard>
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>ck3</host>
<port>9000</port>
<user>default</user>
<password>NtFRw3F4</password>
</replica>
</shard>
</my_cluster>
</remote_servers>
# 集群需要通过zookeeper发现调度
<zookeeper>
<node>
<host>ck3</host>
<port>2181</port>
</node>
<node>
<host>ck2</host>
<port>2181</port>
</node>
<node>
<host>ck1</host>
<port>2181</port>
</node>
</zookeeper>
# 注意集群中节点要修改这里,不允许设置重复的
<macros>
<shard>01</shard>
<replica>my-cluster-01-1</replica>
</macros>
# 启动服务
systemctl start clickhouse-server
下载地址:https://nginx.org/en/download.html,下载1.16.1
# 安装必要的软件
yum install -y gcc gcc-c++ make automake autoconf libtool pcre pcre-devel zlib zlib-devel openssl openssl-devel
# 解压
tar zxf nginx-1.16.1.tar.gz
# 创建必要的临时目录,编译时要用到
mkdir -p /var/temp/nginx
mkdir -p /var/run/nginx
# 编译安装
cd nginx-1.16.1
./configure \
--prefix=/usr/local/nginx \
--pid-path=/var/run/nginx/nginx.pid \
--lock-path=/var/lock/nginx.lock \
--error-log-path=/var/log/nginx/error.log \
--http-log-path=/var/log/nginx/access.log \
--with-http_gzip_static_module \
--with-stream \
--http-client-body-temp-path=/var/temp/nginx/client \
--http-proxy-temp-path=/var/temp/nginx/proxy \
--http-fastcgi-temp-path=/var/temp/nginx/fastcgi \
--http-uwsgi-temp-path=/var/temp/nginx/uwsgi \
--http-scgi-temp-path=/var/temp/nginx/scgi
make && make install
# 配置文件路径 /usr/local/nginx/conf
# 启动
/usr/local/nginx/sbin/nginx
# 重启
/usr/local/nginx/sbin/nginx -s reload
# 代理MySQL、RabbitMQ等tcp服务需要stream模块
stream {
upstream rabbitmq_nodes {
server 192.168.110.230:5672 max_fails=3 fail_timeout=3s;
server 192.168.110.231:5672 max_fails=3 fail_timeout=3s;
}
server {
listen 5675;
proxy_pass rabbitmq_nodes;
}
}
http {
upstream rabbitmq_management {
server 192.168.110.230:15672 max_fails=3 fail_timeout=10s;
server 192.168.110.231:15672 max_fails=3 fail_timeout=10s;
}
server {
listen 80;
location / {
proxy_pass http://rabbitmq_management;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header REMOTE-HOST $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_buffering off;
}
}
}