nginx安装脚本
#!/bin/bash
# Date: 2023-1-19
# Author: lizexin
# version: 2.0
export LANG="zh_CN.UTF-8"
echo -e "\033[31m ############Nginx安装脚本############### \033[0m"
echo -e "\033[31m 请将安装包放在/data/soft中 \033[0m"
echo -e "\033[31m 开始检查安装包路径是否存在 \033[0m"
# 软件包位置
if [ -d "/data/soft" ];then
echo -e "\033[32m 存在,不需要创建! \033[0m"
else
read -p "不存在,是否需要创建? 请输入(y/n)" yes_no
if [ "$yes_no" = "y" ];then
mkdir -p /data/soft
elif [ "$yes_no" = "n" ];then
echo -e "\033[31m 不创建,退出 \033[0m"
exit 1
else
echo -e "\033[31m 输入有误! \033[0m"
exit 1
fi
fi
# 检查gcc依赖,影响make
echo -e "\033[31m 开始检查是否有gcc依赖 \033[0m"
gcc_num=`rpm -qa |grep ^gcc|wc -l`
if [ "$gcc_num" -ge 2 ];then
echo -e "\033[32m gcc依赖存在,无需安装 \033[0m"
else
echo -e "\033[31m gcc依赖不存在或者不全,检查安装"
yum -y install gcc gcc-c++
fi
# 检查perl是否存在,在安装openssl是容易报错
echo -e "\033[31m 开始检查是否有perl依赖 \033[0m"
perl_num=`rpm -qa |grep perl-[1-9]|wc -l`
perl_version=`rpm -qa |grep perl-[1-9]|awk -F [-] '{print $2}'|awk -F [.] '{print $1}'`
if [ "$perl_num" -ge 1 ] && [ "$perl_version" -ge 5 ];then
echo -e "\033[32m perl依赖存在,无需安装 \033[0m"
elif [ "$perl_num" -ge 1 ] && [ "$perl_version" -le 5 ];then
echo -e "033[31m perl版本低于5,检查安装"
else
echo -e "\033[31m perl不存在,开始安装"
yum -y install perl
fi
# 安装pcre
echo -e "\033[32m 进入目录:/data/soft \033[0m"
cd /data/soft
echo -e "\033[32m 开始解压pcre \033[0m"
read -p "请输入版本,例如(8.45): " pcre_version
echo -e "\033[35m 解压:pcre-"$pcre_version".tar.gz \033[0m"
tar -zxvf pcre-"$pcre_version".tar.gz 2>&1 >/dev/null
sleep 1
cd /data/soft
# 安装zlib
echo -e "\033[32m 开始解压zlib \033[0m"
read -p "请输入zlib版本,例如(1.2.10): " zlib_version
echo -e "\033[35m 解压:zlib-"$zlib_version".tar.gz \033[0m"
tar -zxvf zlib-"$zlib_version".tar.gz 2>&1 >/dev/null
sleep 1
cd /data/soft
# 安装openssl
echo -e "\033[32m 开始解压openssl \033[0m"
read -p "请输入openssl版本,例如(1.1.1l): " openssl_version
echo -e "\033[35m 解压:openssl-"$openssl_version".tar.gz \033[0m"
tar -zxvf openssl-"$openssl_version".tar.gz 2>&1 >/dev/null
sleep 1
cd /data/soft
# 安装nginx
echo -e "\033[32m 开始解压nginx \033[0m"
read -p "请输入nginx版本,例如(1.12.2): " nginx_version
echo -e "\033[35m 解压:nginx-"$nginx_version".tar.gz \033[0m"
tar -zxvf nginx-"$nginx_version".tar.gz 2>&1 >/dev/null
cd nginx-"$nginx_version"
read -p "请输入nginx安装路径,例如(/data/nginx): " nginx_path
echo -e "\033[35m 开始加载.... \033[0m"
./configure --user=root --group=root --prefix=${nginx_path} --with-http_stub_status_module --with-http_gzip_static_module --with-http_realip_module --with-http_sub_module --with-http_ssl_module --with-pcre=/data/soft/pcre-"$pcre_version" --with-zlib=/data/soft/zlib-"$zlib_version" --with-openssl=/data/soft/openssl-"$openssl_version" 2>&1 >/dev/null
echo -e "\033[35m 开始安装,请稍等... \033[0m"
make 2>&1 >>/dev/null
make install 2>&1 >>/dev/null
echo -e "\033[32m 创建日志目录:${nginx_path}/logs \033[0m"
mkdir -p ${nginx_path}/logs
if [ "$?" -eq "0" ];then
echo -e "\033[32m 安装完成! \033[0m"
else
echo -e "\033[31m 安装失败! \033[0m"
exit 1
fi
sleep 3
cd /data/soft
read -p "是否需要删除下载的安装(输入y/Y删除,其他不删除):" inputMsg
if [ "$inputMsg" == 'y' ] || [ "$inputMsg" == 'Y' ] ;then
rm -rf nginx-"$nginx_version".tar.gz pcre-"$pcre_version".tar.gz openssl-"$openssl_version".tar.gz zlib-"$zlib_version".tar.gz
echo -e "\033[32m 删除完成 \033[0m"
else
echo -e "\033[32m 不删除 \033[0m"
fi
read -p "是否需要启动nginx(输入y/Y启动,其他不启动):" startNginxMsg
if [ "$startNginxMsg" == 'y' ] || [ "$startNginxMsg" == 'Y' ] ;then
${nginx_path}/sbin/nginx
if [ $? -eq 0 ];then
localIp=`ifconfig -a |grep inet|grep -v inet6|grep -v 127|awk '{print $2}'|tr -d "addr:"`
echo -e "\033[32m 启动成功,请访问: http://$localIp \033[0m"
else
echo -e "\033[31m 启动失败,请查看异常信息确定失败原因 \033[0m"
fi
else
echo -e "\033[35m 不启动 \033[0m"
fi
echo -e "\033[32m nginx安装完成! \033[0m"
echo -e "\033[32m 版本信息: \033[0m"
echo -e "\033[35m pcre: \033[0m" $pcre_version
echo -e "\033[35m zlib: \033[0m" $zlib_version
echo -e "\033[35m openssl: \033[0m" $openssl_version
echo -e "\033[35m nginx: \033[0m" $nginx_version
echo -e "\033[32m 安装路径: \033[0m" ${nginx_path}
echo "end"
逻辑卷安装与扩容脚本
#!/bin/bash
# Date:2023-1-16
# Author:lizexin
# Version:2.0
echo -e "\033[31m 磁盘创建与扩容,此版本将磁盘所有空间添加 \033[0m"
cat <<END
1.[新建磁盘分区]
2.[磁盘扩容]
3.[退出]
请输入您需要执行的序号:
END
read num
expr $num + 1 &> /dev/null
[ $? -ne 0 ] && {
echo -e "\033[31m 序号必须输入{1|2|3}.. \033[0m"
exit 1
}
[ "$num" -eq "1" ] && {
echo -e "\033[31m 开始新建分区...请等待... \033[0m"
sleep 2;
# 创建物理卷
read -p "请输入需要创建的物理卷,例如(/dev/vdb): " Physical_Volume
pvcreate $Physical_Volume &>/dev/null
if [ $? -ne 0 ];then
echo -e "\033[31m 创建${Physical_Volume}错误,请检查是否存在... \033[0m"
exit 1
else
echo -e "\033[32m 创建成功,物理卷为${Physical_Volume} \033[0m"
fi
# 创建卷组
read -p "请输入需要创建的卷组名称,例如(vg_data): " Volume_Group
vgcreate $Volume_Group $Physical_Volume 2>&1 >/dev/null
if [ $? -ne 0 ];then
echo -e "\033[31m 创建${Volume_Group}错误,请检查是否已存在... \033[0m"
exit 1
else
echo -e "\033[32m 创建成功,卷组为${Volume_Group}... \033[0m"
fi
# 创建逻辑卷
read -p "请输入需要创建的逻辑卷名称,例如(lv_data): " Logical_Volume
lvcreate -l 100%vg -n $Logical_Volume $Volume_Group 2>&1 >/dev/null
if [ $? -ne 0 ];then
echo -e "\033[31m 创建${Logical_Volume}错误,请检查是否正确... \033[0m"
exit 1
else
echo -e "\033[32m 创建成功,逻辑卷为${Logical_Volume} \033[0m"
fi
# 格式化
read -p "请选择格式化方式,请输入{ext4|xfs} " MKFS
if [ $MKFS = "ext4" ];then
mkfs."${MKFS}" /dev/${Volume_Group}/${Logical_Volume} 2>&1 >/dev/null
echo -e "\033[32m 格式化成功,类型为${MKFS} \033[0m"
elif [ $MKFS = "xfs" ];then
mkfs."${MKFS}" /dev/${Volume_Group}/${Logical_Volume}
echo -e "\033[32m 格式化成功,类型为${MKFS} \033[0m" 2>&1 >/dev/null
else
echo -e "输入有误!"
exit 2
fi
# 挂载点创建
read -p "创建挂载目录,请输入目录名称(例如:/data): " Dir
[ -d $Dir ] && echo -e "\033[31m 目录已存在 \033[0m" || mkdir $Dir
# 临时挂载
echo -e "\033[32m 开始挂载 \033[0m"
mount /dev/${Volume_Group}/${Logical_Volume} $Dir
# 永久挂载
echo "/dev/mapper/${Volume_Group}-${Logical_Volume} ${Dir} ${MKFS} defaults 0 0 " >>/etc/fstab
if [ $? -ne 0 ];then
echo -e "\033[31m 挂载失败,请查看是否有误! \033[0m"
exit 1
else
echo -e "\033[31m 创建成功,请用以下命令自行查看(df -hT && cat /etc/fstab) \033[0m"
fi
echo -e "\033[31m 磁盘分配完成... \033[0m"
exit 0
}
[ "$num" -eq "2" ] && {
echo -e "\033[31m 开始磁盘扩容...请等待... \033[0m"
sleep 2;
# 创建物理卷
read -p "请输入需要创建的物理卷,例如(/dev/vdb): " Physical_Volume
pvcreate $Physical_Volume 2>&1 >/dev/null
if [ $? -ne 0 ];then
echo -e "\033[31m 创建${Physical_Volume}错误,请检查是否存在... \033[0m"
exit 1
else
echo -e "\033[32m 创建成功,物理卷为${Physical_Volume} \033[0m"
fi
# 将物理卷/dev/vdc加入卷组
read -p "是否需要查看现有卷组,请输入(y/n): " VG_display
while [ "1" -eq "1" ];do
if [ "$VG_display" = "y" ];then
vgs
break
elif [ "$VG_display" = "n" ];then
echo -e "\033[31m 退出查看! \033[0m"
break
else
echo -e "\033[31m 您的输入有误,已退出! \033[0m"
break
fi
done
read -p "请输入要扩容的卷组名称,例如(vg_data): " Volume_Group
vgextend $Volume_Group $Physical_Volume 2>&1 >/dev/null
if [ $? -ne 0 ];then
echo -e "\033[31m 输入有误,请检查是否存在... \033[0m"
exit 1
else
echo -e "\033[32m 加入成功! \033[0m"
fi
read -p "请输入要添加的逻辑卷名称,例如(lv_data):" Logical_Volume
echo -e "\033[32m 开始添加! \033[0m"
lvextend -l +100%FREE -n /dev/mapper/${Volume_Group}-${Logical_Volume} 2>&1 >/dev/null
sleep 2;
if [ $? -ne 0 ];then
echo -e "\033[31m 添加失败,请检查是否存在... \033[0m"
exit 1
else
echo -e "\033[32m 加入成功! \033[0m"
fi
read -p "请选择文件类型格式,请输入{ext4|xfs} " MKFS
if [ $MKFS = "ext4" ];then
resize2fs /dev/mapper/${Volume_Group}-${Logical_Volume}
echo -e "\033[32m 扩容成功,类型为${MKFS} \033[0m" 2>&1 >/dev/null
elif [ $MKFS = "xfs" ];then
xfs_growfs /dev/mapper/${Volume_Group}-${Logical_Volume} 2>&1 >/dev/null
echo -e "\033[32m 扩容成功,类型为${MKFS} \033[0m"
else
echo -e "输入有误!"
exit 2
fi
echo -e "\033[31m 磁盘分配完成,验证请用df -hT \033[0m"
exit 0
}
# 退出
[ "$num" -eq "3" ] && {
echo "bye!"
exit 3
}
# 限制用户输入的必须是1,2,3
[[ ! "$num" =~ [1-3] ]] && {
echo "序号必须输入{1|2|3}"
exit 4
}
tomcat重启
#!/bin/bash
# Date:2023-1-9
# Author:lizexin
# Version:1.0
# 路径
App_Path=$(cd `dirname $0`;pwd)
# 获取Old_PID
PID=`ps -ef|grep java|grep -v grep|grep $App_Path |awk '{print $2}'`
function RESTART() {
echo -e "\033[35m Tomcat restarting! \033[0m"
if [[ -n $PID ]];then
echo -e "\033[35m Stop App! \033[0m"
kill -9 $PID
sleep 5
echo -e "\033[35m Start clearing the cache! \033[0m"
rm -rf ${App_Path}/../work/Catalina
sleep 3
echo -e "\033[35m Start App! \033[0m"
${App_Path}/startup.sh 2>&1 >/dev/null
New_PID=`ps -ef|grep java|grep -v grep|grep $App_Path |awk '{print $2}'`
echo -e "\033[35m Application startup completed,New PID is ${New_PID} \033[0m"
else
echo -e "\033[35m There are no running processes \033[0m"
echo -e "\033[35m Start clearing the cache! \033[0m"
rm -rf ${App_Path}/../work/Catalina
echo -e "\033[35m Start App! \033[0m"
${App_Path}/startup.sh 2>&1 >/dev/null
New_PID=`ps -ef|grep java|grep -v grep|grep $App_Path |awk '{print $2}'`
sleep 10
echo -e "\033[35m Application startup completed,New PID is ${New_PID} \033[0m"
fi
exit
}
RESTART
nginx升级脚本
#!/bin/bash
# Date: 2022-11-22
# Author: lizexin
# Explain: Nginx Upgrade script
echo -e "\033[35m 请将安装包放在/data/soft \033[0m"
read -p "请输入现有Nginx安装路径,如(/data/nginx): " Old_nginx_path
echo -e "\033[32m 开始获取旧Nginx安装参数 \033[0m"
Param=`${Old_nginx_path}/sbin/nginx -V |& awk -F [:] '/^configure/{print $2}'`
echo -e "\033[35m 旧版本安装参数为: \033[0m" $Param
cd /data/soft
read -p "请输入准备更新的Nginx版本,如(1.10.2): " New_Nginx_version
echo -e "\033[35m 开始解压 \033[0m"
tar -zxvf nginx-${New_Nginx_version}.tar.gz
echo -e "\033[32m 开始编译新nginx源码包,请等待... \033[0m"
cd nginx-${New_Nginx_version}
./configure ${Param} >> /dev/null 2>&1
make >> /dev/null 2>&1
if [ $? -eq 0 ];then
echo -e "\033[32m 编译完成,继续进行... \033[0m"
else
echo -e "\033[31m 编译失败,请检查失败原因... \033[0m"
exit 1
fi
echo -e "\033[32m 开始备份旧nginx二进制配置文件 \033[0m"
# 备份旧nginx二进制文件的配置文件(期间nginx服务不停止)
mv ${Old_nginx_path}/sbin/nginx ${Old_nginx_path}/sbin/nginx_$(date +%F)
# 复制新nginx二进制文件
echo -e "\033[32m 开始复制新nginx二进制配置文件 \033[0m"
cp /data/soft/nginx-${New_Nginx_version}/objs/nginx ${Old_nginx_path}/sbin/nginx
# 测试新版本nginx是否正常
echo -e "\033[32m 开始验证新版本nginx是否正常... \033[0m"
successful=`${Old_nginx_path}/sbin/nginx -t |& awk '/successful/{print $NF}'`
if [ "$successful" == "successful" ];then
echo -e "\033[32m 版本正常,继续进行... \033[0m"
sleep 3;
else
echo -e "\033[31m 版本异常,请检查失败原因... \033[0m"
exit 2
fi
# 给nginx发送平滑迁移信号
echo -e "\033[32m 开始给nginx发送平滑迁移信号... \033[0m"
kill -USR2 `cat ${Old_nginx_path}/logs/nginx.pid`
sleep 3;
# 关闭旧的nginx进程
echo -e "\033[32m 开始关闭旧的nginx进程... \033[0m"
kill -WINCH `cat ${Old_nginx_path}/logs/nginx.pid.oldbin`
sleep 3;
# 结束工作进程
kill -HUP `cat ${Old_nginx_path}/logs/nginx.pid.oldbin`
kill -QUIT `cat ${Old_nginx_path}/logs/nginx.pid.oldbin`
sleep 3;
# 验证nginx升级是否完成
echo -e "\033[32m 验证升级是否完成... \033[0m"
${Old_nginx_path}/sbin/nginx -V
sleep 3;
echo -e "\033[32m 升级结束... \033[0m"
exit 0
ansible(二)
一、ansible playbook应用概述
-
概述
playbook是ansible用于配置,部署和管理被控节点的剧本。他就是组织多个task的容器,他的实质就是一个文件,有着特定的组织格式,它采用的语法格式是YAML(Yet Another Markup Language); -
组成
playbook是由一个或多个play组成的列表,play的主要功能是为ansible中的task定义好一个角色,指定剧本对应的服务器组,组成一个完整的流程控制集合。 -
应用场景
执行一些简单的任务,使用ad-hoc命令可以方便的解决问题,但是有时一个设施过于复杂,需要大量的操作的时候,执行ad-hoc命令是不合适的,这时最好使用playbook。
就像执行shell命令写shell脚本一样,也可以理解为批处理任务
,不过playbook有自己的语法格式。
使用playbook可以方便的重用这些代码,可以移植到不同的机器上,像函数一样,最大化的利用代码。在使用ansible的过程中会发现,所处理的大部分操作都是编写playbook。可以把常见的应用都写成playbook,之后管理服务器便简单许多。
二、ansible playbook语法
-
YMAL格式是类似于JSON的文件格式,便于理解和阅读,同时便于书写。首先学习下YMAL的个格式。
- 文件的第一行应该以"---"(三个连字符)开始,表名YMAL文件的开始
- 在同一行中,#之后的内容表示注释,类似于shell,python和ruby
- YMAL中的列表元素以"-"开头然后紧跟着一个空格,后面为元素内容
- 同一个列表中的元素应该保持相同的缩进,否则会被当成错误处理
- play中hosts,variables,roles,tasks等对象的表示方法都是键值中间以":"分隔表示,":"后面还要增加一个空格
--- #安装与运行MySQL服务 - hosts: slave1 remote_user: root task: - name: install mysql-server package yum: name=mysql-server state=present - name: startint mysqld service service: name=mysql state=started
我们的文件名称应该是以.yml结尾,像我们上面的例子就是mysql.yml。其中,由三部分组成
host部分:使用hosts指示使用哪个主机或主机组来运行下面的tasks,每个playbook都必须指定hosts,hosts也可以使用通配符格式。主机或主机组在inventory清单中指定,可以使用系统默认的/etc/ansible/hosts,也可以自己编辑,在运行的时候加上-i选项,指定清单的位置即可。在运行清单文件的时候--list-hosts选项会显示哪些主机将参与执行task的过程。
remote_user:指定远端主机中的哪个用户来登录远端系统,在远端系统执行task的用户,可以任意指定,也可以使用sudo,但是用户必须要有执行相应task的权限
tasks:指定远端主机将要执行的一系列动作。tasks的核心为ansible的模块,tasks包含name和要执行的模块,name是可选的,只是为了便于用户阅读,不过还是建议加上,模块是必须的,同时也要给予模块相应的参数。
使用ansible-play运行playbook文件,得到如输出以下信息,输出内容为json格式。并且由不同颜色组成,便于识别,一般而言- 绿色代表执行成功,系统保持原样
- 黄色代表系统状态发生改变
- 红色代表执行失败,显示错误输出
执行有三个步骤:1、收集facts 2、执行tasks 3、报告结果
-
基础组件
-
ansible-playbook命令语法:
ansible-playbook <filename.yml>...[options]
[options]:
-C, --check:并不在远程主机上执行,只是测试
-i PATH, --inventory=PATH:资产的文件路径
--flush-cache:清楚fact缓存
--list-hosts:列出匹配的远程主机,并不执行任何动作
-t,TAGS, --tags=TAGS:运行指定的标签任务
--skip-tags:跳过指定的notify
- 远程主机通过playbook安装nginx
[root@ansible ~]# vi nginx_install.yaml
---
- hosts: 192.168.100.102
remote_user: root
tasks:
- name: nginxa
yum: name=pcre-devel,pcre,zlib-devel state=present
- name: nginxb
shell: cd /root; wget ftp://192.168.100.100/tools/nginx-1.12.2.tar.gz;tar zxvf /root/nginx-1.12.2.tar.gz -C /usr/src;cd /usr/src/nginx-1.12.2/;./configure --prefix=/usr/local/nginx;make;make install
- 利用playbook编写文件为远端主机安装http并配置其网页
---
- hosts: 192.168.100.103
remote_user: root
vars:
- pack: httpd
- serv: httpd
tasks:
- name: install httpd
yum: name={{ pack }} state=present
- name: copy html
copy: src=/tmp/index.html dest=/var/www/html/
notify:
- restart httpd
- name: start httpd services
shell: systemctl start httpd;systemctl enable httpd
handlers:
- name: restart httpd
service: name=httpd state=restarted
[root@ansible ansible]# echo "haohaoxuexi">/tmp/index.html
[root@ansible ansible]# ansible-playbook httpd.yml
[root@ansible ansible]# echo "tiantianxiangshang">/tmp/index.html
[root@ansible ansible]# ansible-playbook httpd.yml
- ansible循环基于列表items多值进行循环创建用户
---
[root@ansible ansible]# vim loop.yml
- hosts: 192.168.100.103
remote_user: root
tasks:
- name: create users
user: name={{ item }} state=present
with_items:
- test11
- test21
- test31
- ansible匹配节点的ip地址,批量修改主机名
[root@ansible ansible]# vim hostname.yml
---
- hosts: web
remote_user: root
tasks:
- name: show hostname
shell: hostname
- name: show ip
command: ip a
- hostname: name=web{{ ansible_default_ipv4.address.split('.')[-1] }}
MongoDB复制集
一、MongoDB复制集概述
-
组成
MongoDB复制集(副本集replica set)由一组mongod实例(进程)组成,包含一个Primary节点和多个Secondary节点,MongoDB Driver(客户端)的所有数据都写入primary,secondary通过oplog来同步primary的数据,保证主节点和从节点数据的一致性,复制集在完成主从复制的基础上,通过心跳机制,一旦primary节点出现宕机,则触发选举一个新的主节点,剩下的secondary节点指向新的primary,时间应该在10-30s内完成感知primary节点故障,实现高可用数据库集群; -
特点
- 主是唯一的,但不是固定的
- 通过oplog同步数据,保证数据的一致性;
- 从库无法写入(默认情况下,不使用驱动连接时,读也是不能查询的);
- 相对于传统的主从结构,复制集可以自动容灾;
二、MongoDB复制集原理
-
角色(按是否存储数据划分)
- primary主节点
由选举产生,负责客户端的写操作,产生oplog日志文件 - secondary从节点
负责客户端的读操作,提供数据的备份和故障的切换 - arbiter仲裁节点
只参与选举的投票,不会成为primary,也不向primary同步数据,若部署了一个2个节点的复制集,1个primary,1个secondary,任意节点宕机,复制集将不能提供服务(无法选出primary),这时可以给复制集添加一个arbiter节点,即使有节点宕机,仍能选出primary;
- primary主节点
-
角色(按类型区分)
- standard(标准)
这种是常规节点,它存储一份完整的数据副本,参与投票选举,有可能成为主节点 - passive(被动)
存储完整的数据副本,参与投票,不能成为活跃节点 - arbiter(投票)
仲裁节点只参与投票,不接收复制的数据,也不能成为活跃节点
注:每个参与节点(非仲裁者)有个优先权(0-1000),优先权(priority)为0则是被动的,不能成为活跃节点,优先权不为0的,按照由大到小选出活跃节点,优先值一样的则看谁的数据比较新; 注:Mongodb 3.0里,复制集成员最多50个,参与Primary选举投票的成员最多7个;
- standard(标准)
-
选举
每个节点通过优先级定义出节点的类型(标准、被动、投票);
标准节点通过对比自身数据进行选举出primary节点或者secondary节点;-
影响选举的因素
- 心跳检测
复制集内成员每隔两秒向其他成员发送心跳检测信息,若10秒内无响应,则标记其为不可用 - 连接
在多个节点中,最少保证连个节点为工作状态,如果集群中共三个节点,挂掉两个节点,那么剩余的节点无论状态是primary还是处于选举过程中,都会直接降权为secondary
- 心跳检测
-
触发的选举的情况
- 初始化状况
- 从节点们无法与主节点进行通信
- 主节点辞职
-
主节点辞职情况
- 在接收到replSetStepDown命令后
- 在现有的环境中,其他secondary节点的数据落后与本身10s内,且拥有更高优先级
- 当主节点无法与集群中多数节点通信时
注:当主节点辞职后,主节点将关闭自身所有的连接,避免出现客户端在从节点进行写入操作;
-
-
总结
- 架构方面
复制集由一个primary和多个secondary节点构成,在多个实例中进行选举以上角色; - 选举过程
每个MongoDB实例都会通过优先级被定义成为不同的类型(标准、被动、投票);
标准节点和被动节点进行投票选举,选举一个标准节点作为primary节点;
其他所有节点除仲裁节点以外,都被选举成secondary节点,通过oplog日志文件向primary节点同步数据; - 故障切换
在集群内,所有节点每隔10秒发送一次心跳检测信息,若在规定时间内未响应,则认为宕机,触发选举操作;
在多个节点中,至少两个节点是正常工作状态,如只剩下一个节点,则降级为secondary;
- 架构方面
三、部署MongoDB实现应用复制集
-
异常处理
当primary宕机时,如果有数据未同步到secondary,当primary重新加入时,如果新的primary上已经发生写操作,则旧primary需要回滚部分操作,以保证数据集与新的primary一致;旧primary将会滚的数据写到单独的rollback目录下,数据库管理员可根据需要使用mongorestore进行恢复。 -
环境
系统 | 主机名 | IP | 所需软件 |
---|---|---|---|
centos7.8 | mongodb | 192.168.100.105 | mongodb-linux-x86_64-rhel70-3.6.3.tgz |
- 安装mongodb
[root@mongodb ~]# tar xf mongodb-linux-x86_64-rhel70-3.6.3.tgz
[root@mongodb ~]# mv mongodb-linux-x86_64-rhel70-3.6.3 /usr/local/mongodb
[root@mongodb ~]# echo "export PATH=/usr/local/mongodb/bin:\$PATH" >>/etc/profile
[root@mongodb ~]# source /etc/profile
[root@mongodb ~]# ulimit -n 25000
[root@mongodb ~]# ulimit -u 25000
[root@mongodb ~]# echo 0 >/proc/sys/vm/zone_reclaim_mode
[root@mongodb ~]# sysctl -w vm.zone_reclaim_mode=0
vm.zone_reclaim_mode = 0
[root@mongodb ~]# echo never >/sys/kernel/mm/transparent_hugepage/enabled
[root@mongodb ~]# echo never >/sys/kernel/mm/transparent_hugepage/defrag
- 创建并启动四个实例
[root@mongodb ~]# cd /usr/local/mongodb/bin/
[root@mongodb bin]# mkdir {../mongodb1,../mongodb2,../mongodb3,../mongodb4}
[root@mongodb bin]# mkdir ../logs
[root@mongodb bin]# touch ../logs/mongodb{1..4}.log
[root@mongodb bin]# chmod 777 ../logs/mongodb*
[root@mongodb bin]# cat <<END >>/usr/local/mongodb/bin/mongodb1.conf
bind_ip=192.168.100.105
port=27017
dbpath=/usr/local/mongodb/mongodb1/
logpath=/usr/local/mongodb/logs/mongodb1.log
logappend=true
fork=true
maxConns=5000
replSet=haha
#replication name
END
[root@mongodb bin]# cat <<END >>/usr/local/mongodb/bin/mongodb2.conf
bind_ip=192.168.100.105
port=27018
dbpath=/usr/local/mongodb/mongodb2/
logpath=/usr/local/mongodb/logs/mongodb2.log
logappend=true
fork=true
maxConns=5000
replSet=haha
END
[root@mongodb bin]# vi /usr/local/mongodb/bin/mongodb2.conf
[root@mongodb bin]# cat <<END >>/usr/local/mongodb/bin/mongodb3.conf
bind_ip=192.168.100.105
port=27019
dbpath=/usr/local/mongodb/mongodb3/
logpath=/usr/local/mongodb/logs/mongodb3.log
logappend=true
fork=true
maxConns=5000
replSet=haha
END
[root@mongodb bin]# cat <<END >>/usr/local/mongodb/bin/mongodb4.conf
bind_ip=192.168.100.105
port=27020
dbpath=/usr/local/mongodb/mongodb4/
logpath=/usr/local/mongodb/logs/mongodb4.log
logappend=true
fork=true
maxConns=5000
replSet=haha
END
[root@mongodb bin]# cd
[root@mongodb ~]# mongod -f /usr/local/mongodb/bin/mongodb1.conf
[root@mongodb ~]# mongod -f /usr/local/mongodb/bin/mongodb2.conf
[root@mongodb ~]# mongod -f /usr/local/mongodb/bin/mongodb3.conf
[root@mongodb ~]# mongod -f /usr/local/mongodb/bin/mongodb4.conf
[root@mongodb ~]# netstat -utpln |grep mongod
tcp 0 0 192.168.100.105:27019 0.0.0.0:* LISTEN 1224/mongod
tcp 0 0 192.168.100.105:27020 0.0.0.0:* LISTEN 1252/mongod
tcp 0 0 192.168.100.105:27017 0.0.0.0:* LISTEN 1168/mongod
tcp 0 0 192.168.100.105:27018 0.0.0.0:* LISTEN 1196/mongod
[root@mongodb ~]# echo -e "/usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/mongodb1.conf \n/usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/mongodb2.conf\n/usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/mongodb3.conf\n/usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/mongodb4.conf">>/etc/rc.local
[root@mongodb ~]# chmod +x /etc/rc.local
[root@mongodb ~]# cat <<END >>/etc/init.d/mongodb
#!/bin/bash
INSTANCE=\$1
ACTION=\$2
case "\$ACTION" in
'start')
/usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/"\$INSTANCE".conf;;
'stop')
/usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/"\$INSTANCE".conf --shutdown;;
'restart')
/usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/"\$INSTANCE".conf --shutdown
/usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/"\$INSTANCE".conf;;
esac
END
[root@mongodb ~]# chmod +x /etc/init.d/mongodb
[root@mongodb ~]# /etc/init.d/mongodb mongodb1 stop
[root@mongodb ~]# /etc/init.d/mongodb mongodb1 start
- 配置实例
[root@mongodb ~]# mongo --port 27017 --host 192.168.100.105
##添加节点
> cfg={"_id":"haha","members":[{"_id":0,"host":"192.168.100.105:27017"},{"_id":1,"host":"192.168.100.105:27018"},{"_id":2,"host":"192.168.100.105:27019"}]}
{
"_id" : "haha",
"members" : [
{
"_id" : 0,
"host" : "192.168.100.105:27017"
},
{
"_id" : 1,
"host" : "192.168.100.105:27018"
},
{
"_id" : 2,
"host" : "192.168.100.105:27019"
}
]
}
##初始化节点
> rs.initiate(cfg)
{
"ok" : 1,
"operationTime" : Timestamp(1661686022, 1),
"$clusterTime" : {
"clusterTime" : Timestamp(1661686022, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
##查看复制群状态
haha:OTHER> rs.status()
{
"set" : "haha",
"date" : ISODate("2022-08-28T11:27:19.065Z"),
"myState" : 1,
"term" : NumberLong(1),
"heartbeatIntervalMillis" : NumberLong(2000),
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1661686035, 3),
"t" : NumberLong(1)
},
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1661686035, 3),
"t" : NumberLong(1)
},
"appliedOpTime" : {
"ts" : Timestamp(1661686035, 3),
"t" : NumberLong(1)
},
"durableOpTime" : {
"ts" : Timestamp(1661686035, 3),
"t" : NumberLong(1)
}
},
"members" : [
{
"_id" : 0,
"name" : "192.168.100.105:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY", ##主节点
"uptime" : 452,
"optime" : {
"ts" : Timestamp(1661686035, 3),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2022-08-28T11:27:15Z"),
"infoMessage" : "could not find member to sync from",
"electionTime" : Timestamp(1661686033, 1),
"electionDate" : ISODate("2022-08-28T11:27:13Z"),
"configVersion" : 1,
"self" : true
},
{
"_id" : 1,
"name" : "192.168.100.105:27018",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY", ##从节点
"uptime" : 16,
"optime" : {
"ts" : Timestamp(1661686035, 3),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1661686035, 3),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2022-08-28T11:27:15Z"),
"optimeDurableDate" : ISODate("2022-08-28T11:27:15Z"),
"lastHeartbeat" : ISODate("2022-08-28T11:27:17.897Z"),
"lastHeartbeatRecv" : ISODate("2022-08-28T11:27:15.023Z"),
"pingMs" : NumberLong(0),
"syncingTo" : "192.168.100.105:27017",
"configVersion" : 1
},
{
"_id" : 2,
"name" : "192.168.100.105:27019",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY", ##从节点
"uptime" : 16,
"optime" : {
"ts" : Timestamp(1661686035, 3),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1661686035, 3),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2022-08-28T11:27:15Z"),
"optimeDurableDate" : ISODate("2022-08-28T11:27:15Z"),
"lastHeartbeat" : ISODate("2022-08-28T11:27:17.896Z"),
"lastHeartbeatRecv" : ISODate("2022-08-28T11:27:15.018Z"),
"pingMs" : NumberLong(0),
"syncingTo" : "192.168.100.105:27017",
"configVersion" : 1
}
],
"ok" : 1,
"operationTime" : Timestamp(1661686035, 3),
"$clusterTime" : {
"clusterTime" : Timestamp(1661686035, 3),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
##添加仲裁节点
haha:PRIMARY> rs.addArb("192.168.100.105:27020")
{
"ok" : 1,
"operationTime" : Timestamp(1661686116, 1),
"$clusterTime" : {
"clusterTime" : Timestamp(1661686116, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
haha:PRIMARY> rs.status()
...
{
"_id" : 3,
"name" : "192.168.100.105:27020",
"health" : 1,
"state" : 7,
"stateStr" : "ARBITER",
"uptime" : 19,
"lastHeartbeat" : ISODate("2022-08-28T11:28:54.331Z"),
"lastHeartbeatRecv" : ISODate("2022-08-28T11:28:51.329Z"),
"pingMs" : NumberLong(0),
"configVersion" : 2
}
...
注:rs.add()和rs.remove()命令分别用于添加和删除标准节点
haha:PRIMARY> show dbs
admin 0.000GB
config 0.000GB
local 0.000GB
haha:PRIMARY> use cloud
switched to db cloud
haha:PRIMARY> db.users.insert({"id":"1","name":"xiaoming"})
WriteResult({ "nInserted" : 1 })
haha:PRIMARY> db.users.find()
{ "_id" : ObjectId("630b53a4c821b079238237d4"), "id" : "1", "name" : "xiaoming" }
haha:PRIMARY> exit
bye
[root@mongodb ~]# mongo --port 27018 --host 192.168.100.105
haha:SECONDARY> show dbs ##secondary节点默认无法读取
2022-08-28T19:39:49.549+0800 E QUERY [thread1] Error: listDatabases failed:{
"operationTime" : Timestamp(1661686785, 1),
"ok" : 0,
"errmsg" : "not master and slaveOk=false",
"code" : 13435,
"codeName" : "NotMasterNoSlaveOk",
"$clusterTime" : {
"clusterTime" : Timestamp(1661686785, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
} :
_getErrorWithCode@src/mongo/shell/utils.js:25:13
Mongo.prototype.getDBs@src/mongo/shell/mongo.js:65:1
shellHelper.show@src/mongo/shell/utils.js:816:19
shellHelper@src/mongo/shell/utils.js:706:15
#通过以下方式或者驱动方式实现
haha:SECONDARY> db.getMongo().setSlaveOk();
haha:SECONDARY> show dbs
admin 0.000GB
cloud 0.000GB
config 0.000GB
local 0.000GB
haha:SECONDARY> use cloud
switched to db cloud
##secondary节点无法写入
haha:SECONDARY> db.users.insert({"id":"2","name":"xiaohong"})
WriteResult({ "writeError" : { "code" : 10107, "errmsg" : "not master" } })
haha:SECONDARY> db.users.find()
{ "_id" : ObjectId("630b53a4c821b079238237d4"), "id" : "1", "name" : "xiaoming" }
haha:SECONDARY> exit
bye
- 模拟primary故障,查看角色切换情况
[root@mongodb ~]# mongod -f /usr/local/mongodb/bin/mongodb1.conf --shutdown
[root@mongodb ~]# mongo --port 27018 --host 192.168.100.105
haha:PRIMARY> rs.status()
{
"set" : "haha",
"date" : ISODate("2022-08-28T11:46:53.956Z"),
"myState" : 1,
"term" : NumberLong(2),
"heartbeatIntervalMillis" : NumberLong(2000),
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1661687175, 1),
"t" : NumberLong(1)
},
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1661687175, 1),
"t" : NumberLong(1)
},
"appliedOpTime" : {
"ts" : Timestamp(1661687212, 1),
"t" : NumberLong(2)
},
"durableOpTime" : {
"ts" : Timestamp(1661687212, 1),
"t" : NumberLong(2)
}
},
"members" : [
{
"_id" : 0,
"name" : "192.168.100.105:27017",
"health" : 0,
"state" : 8,
"stateStr" : "(not reachable/healthy)", ##无法访问
"uptime" : 0,
"optime" : {
"ts" : Timestamp(0, 0),
"t" : NumberLong(-1)
},
"optimeDurable" : {
"ts" : Timestamp(0, 0),
"t" : NumberLong(-1)
},
"optimeDate" : ISODate("1970-01-01T00:00:00Z"),
"optimeDurableDate" : ISODate("1970-01-01T00:00:00Z"),
"lastHeartbeat" : ISODate("2022-08-28T11:46:53.225Z"),
"lastHeartbeatRecv" : ISODate("2022-08-28T11:46:19.785Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "Connection refused",
"configVersion" : -1
},
{
"_id" : 1,
"name" : "192.168.100.105:27018",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY", ##主节点
"uptime" : 1748,
"optime" : {
"ts" : Timestamp(1661687212, 1),
"t" : NumberLong(2)
},
"optimeDate" : ISODate("2022-08-28T11:46:52Z"),
"infoMessage" : "could not find member to sync from",
"electionTime" : Timestamp(1661687191, 1),
"electionDate" : ISODate("2022-08-28T11:46:31Z"),
"configVersion" : 2,
"self" : true
},
{
"_id" : 2,
"name" : "192.168.100.105:27019",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY", ##从节点
"uptime" : 1189,
"optime" : {
"ts" : Timestamp(1661687212, 1),
"t" : NumberLong(2)
},
"optimeDurable" : {
"ts" : Timestamp(1661687212, 1),
"t" : NumberLong(2)
},
"optimeDate" : ISODate("2022-08-28T11:46:52Z"),
"optimeDurableDate" : ISODate("2022-08-28T11:46:52Z"),
"lastHeartbeat" : ISODate("2022-08-28T11:46:53.218Z"),
"lastHeartbeatRecv" : ISODate("2022-08-28T11:46:53.380Z"),
"pingMs" : NumberLong(0),
"syncingTo" : "192.168.100.105:27018",
"configVersion" : 2
},
{
"_id" : 3,
"name" : "192.168.100.105:27020",
"health" : 1,
"state" : 7,
"stateStr" : "ARBITER", ##仲裁节点
"uptime" : 1097,
"lastHeartbeat" : ISODate("2022-08-28T11:46:53.213Z"),
"lastHeartbeatRecv" : ISODate("2022-08-28T11:46:51.915Z"),
"pingMs" : NumberLong(0),
"configVersion" : 2
}
],
"ok" : 1,
"operationTime" : Timestamp(1661687212, 1),
"$clusterTime" : {
"clusterTime" : Timestamp(1661687212, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
haha:PRIMARY> exit
bye
- 启用并手动切换primary角色
[root@mongodb ~]# mongod -f /usr/local/mongodb/bin/mongodb1.conf
[root@mongodb ~]# mongo --port 27018 --host 192.168.100.105
haha:PRIMARY> cfg={"_id":"haha","members":[{"_id":0,"host":"192.168.100.105:27017","priority":100},{"_id":1,"host":"192.168.100.105:27018","priority":10},{"_id":2,"host":"192.168.100.105:27019","priority":10}]}
haha:PRIMARY> rs.reconfig(cfg)
haha:PRIMARY> exit
bye
[root@mongodb ~]# mongod -f /usr/local/mongodb/bin/mongodb2.conf --shutdown
[root@mongodb ~]# mongod -f /usr/local/mongodb/bin/mongodb2.conf
[root@mongodb ~]# mongo --port 27017 --host 192.168.100.105
haha:PRIMARY> exit
bye
- 将标准节点停掉,被动节点也不会成为主节点
[root@mongodb ~]# mongod -f /usr/local/mongodb/bin/mongodb1.conf --shutdown
[root@mongodb ~]# mongod -f /usr/local/mongodb/bin/mongodb2.conf --shutdown
[root@mongodb ~]# mongo --port 27019 --host 192.168.100.105
haha:SECONDARY> rs.status()
{
"set" : "haha",
"date" : ISODate("2022-08-28T12:04:41.351Z"),
"myState" : 2,
"term" : NumberLong(4),
"heartbeatIntervalMillis" : NumberLong(2000),
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1661688219, 1),
"t" : NumberLong(4)
},
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1661688219, 1),
"t" : NumberLong(4)
},
"appliedOpTime" : {
"ts" : Timestamp(1661688219, 1),
"t" : NumberLong(4)
},
"durableOpTime" : {
"ts" : Timestamp(1661688219, 1),
"t" : NumberLong(4)
}
},
"members" : [
{
"_id" : 0,
"name" : "192.168.100.105:27017",
"health" : 0,
"state" : 8,
"stateStr" : "(not reachable/healthy)",
"uptime" : 0,
"optime" : {
"ts" : Timestamp(0, 0),
"t" : NumberLong(-1)
},
"optimeDurable" : {
"ts" : Timestamp(0, 0),
"t" : NumberLong(-1)
},
"optimeDate" : ISODate("1970-01-01T00:00:00Z"),
"optimeDurableDate" : ISODate("1970-01-01T00:00:00Z"),
"lastHeartbeat" : ISODate("2022-08-28T12:04:39.126Z"),
"lastHeartbeatRecv" : ISODate("2022-08-28T12:03:47.988Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "Connection refused",
"configVersion" : -1
},
{
"_id" : 1,
"name" : "192.168.100.105:27018",
"health" : 0,
"state" : 8,
"stateStr" : "(not reachable/healthy)",
"uptime" : 0,
"optime" : {
"ts" : Timestamp(0, 0),
"t" : NumberLong(-1)
},
"optimeDurable" : {
"ts" : Timestamp(0, 0),
"t" : NumberLong(-1)
},
"optimeDate" : ISODate("1970-01-01T00:00:00Z"),
"optimeDurableDate" : ISODate("1970-01-01T00:00:00Z"),
"lastHeartbeat" : ISODate("2022-08-28T12:04:39.126Z"),
"lastHeartbeatRecv" : ISODate("2022-08-28T12:03:53.292Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "Connection refused",
"configVersion" : -1
},
{
"_id" : 2,
"name" : "192.168.100.105:27019",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 125,
"optime" : {
"ts" : Timestamp(1661688219, 1),
"t" : NumberLong(4)
},
"optimeDate" : ISODate("2022-08-28T12:03:39Z"),
"infoMessage" : "could not find member to sync from",
"configVersion" : 4,
"self" : true
}
],
"ok" : 1,
"operationTime" : Timestamp(1661688219, 1),
"$clusterTime" : {
"clusterTime" : Timestamp(1661688219, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
haha:SECONDARY> exit
bye
[root@mongodb ~]# mongod -f /usr/local/mongodb/bin/mongodb1.conf
[root@mongodb ~]# mongod -f /usr/local/mongodb/bin/mongodb2.conf
[root@mongodb ~]# netstat -utpln |grep mongod
tcp 0 0 192.168.100.105:27019 0.0.0.0:* LISTEN 1918/mongod
tcp 0 0 192.168.100.105:27020 0.0.0.0:* LISTEN 1252/mongod
tcp 0 0 192.168.100.105:27017 0.0.0.0:* LISTEN 2024/mongod
tcp 0 0 192.168.100.105:27018 0.0.0.0:* LISTEN 2103/mongod
- 查询复制集状态及查看oplog日志文件的大小
[root@mongodb ~]# mongo --port 27019 --host 192.168.100.105
haha:PRIMARY> use local
switched to db local
haha:PRIMARY> rs.printSlaveReplicationInfo() ##查看节点信息
source: 192.168.100.105:27017
syncedTo: Sun Aug 28 2022 20:08:17 GMT+0800 (CST)
0 secs (0 hrs) behind the primary
source: 192.168.100.105:27018
syncedTo: Sun Aug 28 2022 20:08:17 GMT+0800 (CST)
0 secs (0 hrs) behind the primary
haha:PRIMARY> rs.printReplicationInfo()
configured oplog size: 990MB ##oplog大小为990M
log length start to end: 2535secs (0.7hrs)
oplog first event time: Sun Aug 28 2022 19:27:02 GMT+0800 (CST)
oplog last event time: Sun Aug 28 2022 20:09:17 GMT+0800 (CST)
now: Sun Aug 28 2022 20:09:18 GMT+0800 (CST)
haha:PRIMARY> db.oplog.rs.stats()
{
"ns" : "local.oplog.rs",
"size" : 28427,
"count" : 249,
"avgObjSize" : 114,
"storageSize" : 45056,
"capped" : true,
"max" : -1,
"maxSize" : 1038090240, ##单位是字节
...
- 部署用户认证登录(秘钥对)的复制集
[root@mongodb ~]# mongo --port 27019 --host 192.168.100.105
haha:PRIMARY> use admin
switched to db admin
haha:PRIMARY> db.createUser({"user":"root","pwd":"123","roles":["root"]})
Successfully added user: { "user" : "root", "roles" : [ "root" ] }
haha:PRIMARY> exit
bye
[root@mongodb ~]# echo -e "clusterAuthMode=keyFile\nkeyFile=/usr/local/mongodb/bin/cloudkey1" >>/usr/local/mongodb/bin/mongodb1.conf
[root@mongodb ~]# echo -e "clusterAuthMode=keyFile\nkeyFile=/usr/local/mongodb/bin/cloudkey2" >>/usr/local/mongodb/bin/mongodb2.conf
[root@mongodb ~]# echo -e "clusterAuthMode=keyFile\nkeyFile=/usr/local/mongodb/bin/cloudkey3" >>/usr/local/mongodb/bin/mongodb3.conf
[root@mongodb ~]# echo "haha key" >/usr/local/mongodb/bin/cloudkey1
[root@mongodb ~]# echo "haha key" >/usr/local/mongodb/bin/cloudkey2
[root@mongodb ~]# echo "haha key" >/usr/local/mongodb/bin/cloudkey3
[root@mongodb ~]# chmod 600 /usr/local/mongodb/bin/cloudkey*
[root@mongodb ~]# /etc/init.d/mongodb mongodb1 restart
[root@mongodb ~]# /etc/init.d/mongodb mongodb2 restart
[root@mongodb ~]# /etc/init.d/mongodb mongodb3 restart
[root@mongodb ~]# mongo --port 27019 --host 192.168.100.105
haha:PRIMARY> show dbs
2022-08-28T20:15:53.475+0800 E QUERY [thread1] Error: listDatabases failed:{
...
haha:PRIMARY> use admin
switched to db admin
haha:PRIMARY> db.auth("root","123")
1
haha:PRIMARY> show dbs
admin 0.000GB
cloud 0.000GB
config 0.000GB
local 0.000GB
haha:PRIMARY> exit
bye