1. Euler 21.10安装oracle19c-RAC
1.1. 环境规划
1.1.1. 主机规划
hostname | IP | 实例名 |
hfdb90 | 192.168.40.90 | hfdb1 |
hfdb91 | 192.168.40.90 | hfdb2 |
系统版本
BigCloud Enterprise Linux For Euler 21.10 (GNU/Linux 4.19.90-2107.6.0.0100.oe1.bclinux.x86_64 x86_64)
1.1.2. 磁盘规划
dggrid1:1g*3 -- 安装grid软件 ocr磁盘
dgsystem:8g=8g*1 -- 系统文件
dgrecovery:5g=5g*1 -- 闪回、归档目录
dgdata01:5g=5g*1 -- 数据文件目录
主机一:
主机二:
1.2. 主机配置(两台主机都要配置)
1.2.1. 修改主机名
主机名必须要和 hosts 文件中配置的名称要一致,否则后面安装会报错。
-- 主机一
hostnamectl set-hostname hfdb90 && bash -- 主机二
hostnamectl set-hostname hfdb91 && bash
1.2.2. host文件配置
- vi /etc/hosts
cat >> /etc/hosts << EOF192.168.40.90 hfdb90
192.168.40.91 hfdb91
10.10.10.90 hfdb90priv
10.10.10.91 hfdb91priv
192.168.40.92 hfdb90vip
192.168.40.93 hfdb91vip
192.168.40.94 hfdbscan
EOF
1.2.3. 修改环境变量
cat >> ~/.bash_profile << EOF
export LANG=en_US.UTF8EOF-- 使生效
source ~/.bash_profile
1.2.4. 创建用户和组
/usr/sbin/groupadd -g 60001 oinstall
/usr/sbin/groupadd -g 60002 dba
/usr/sbin/groupadd -g 60003 oper
/usr/sbin/groupadd -g 60004 backupdba
/usr/sbin/groupadd -g 60005 dgdba
/usr/sbin/groupadd -g 60006 kmdba
/usr/sbin/groupadd -g 60007 asmdba
/usr/sbin/groupadd -g 60008 asmoper
/usr/sbin/groupadd -g 60009 asmadmin
/usr/sbin/useradd -u 61001 -g oinstall -G asmadmin,asmdba,dba,asmoper,oper grid
/usr/sbin/useradd -u 61002 -g oinstall -G dba,backupdba,dgdba,kmdba,asmadmin,asmdba,oper oracle
echo "oracle" | passwd --stdin grid
echo "oracle" | passwd --stdin oracle
1.2.5. 创建目录
mkdir -p /oracle/app/grid
mkdir -p /oracle/app/19c/grid
chown -R grid:oinstall /oracle
mkdir -p /oracle/app/oraInventory
chown -R grid:oinstall /oracle/app/oraInventory
mkdir -p /oracle/app/oracle/product/19c/db_1
chown -R oracle:oinstall /oracle/app/oracle
chmod -R 775 /oracle
chown -R grid:oinstall /backup
chmod -R 775 /backup
1.2.6. 配置yum源
mount /dev/cdrom /mnt
cd /etc/yum.repos.d
mkdir bk
mv *.repo bk/
echo "[EL]" >> /etc/yum.repos.d/hefei.repo
echo "name =Linux 7.x DVD" >> /etc/yum.repos.d/hefei.repo
echo "baseurl=file:///mnt" >> /etc/yum.repos.d/hefei.repo
echo "gpgcheck=0" >> /etc/yum.repos.d/hefei.repo
echo "enabled=1" >> /etc/yum.repos.d/hefei.repo cat /etc/yum.repos.d/itpux.repo
[root@hfcwdb66 yum.repos.d]# yum list |more
1.2.7. 安装相关软件
# From Public Yum or ULN
yum -y install autoconf
yum -y install automake
yum -y install binutils
yum -y install binutils-devel
yum -y install bison
yum -y install cpp
yum -y install dos2unix
yum -y install ftp
yum -y install gcc
yum -y install gcc-c++
yum -y install lrzsz
yum -y install python-devel
yum -y install compat-db*
yum -y install compat-gcc-34
yum -y install compat-gcc-34-c++
yum -y install compat-libcap1
yum -y install compat-libstdc++-33*
yum -y install compat-libstdc++-33.i686
yum -y install glibc-*
yum -y install glibc-*.i686
yum -y install libXpm-*.i686
yum -y install libXp.so.6
yum -y install libXt.so.6
yum -y install libXtst.so.6
yum -y install libXext
yum -y install libXext.i686
yum -y install libXtst
yum -y install libXtst.i686
yum -y install libX11
yum -y install libX11.i686
yum -y install libXau
yum -y install libXau.i686
yum -y install libxcb
yum -y install libxcb.i686
yum -y install libXi
yum -y install libXi.i686
yum -y install libXtst
yum -y install libstdc++-docs
yum -y install libgcc_s.so.1
yum -y install libstdc++.i686
yum -y install libstdc++-devel
yum -y install libstdc++-devel.i686
yum -y install libaio
yum -y install libaio.i686
yum -y install libaio-devel
yum -y install libaio-devel.i686
yum -y install ksh
yum -y install libXp
yum -y install libaio-devel
yum -y install numactl
yum -y install numactl-devel
yum -y install make -y
yum -y install sysstat -y
yum -y install unixODBC
yum -y install unixODBC-devel
yum -y install elfutils-libelf-devel-0.97
yum -y install elfutils-libelf-devel
yum -y install redhat-lsb-corerpm -ivh compat-libstdc-33-3.2.3-72.el7.x86_64.rpm
rpm -ivh compat-libcap1-1.10-7.el7.x86_64.rpm
1.2.8. 设置启动级别(可选)
systemctl set-default multi-user.target
1.3. 修改系统相关参数(两台主机都要配置)
1.3.1. 修改资源限制参数
cat >> /etc/security/limits.conf << EOF
#ORACLE SETTING
grid soft nproc 16384
grid hard nproc 16384
grid soft nofile 65536
grid hard nofile 65536
grid soft stack 32768
grid hard stack 32768
oracle soft nproc 16384
oracle hard nproc 16384
oracle soft nofile 65536
oracle hard nofile 65536
oracle soft stack 32768
oracle hard stack 32768
#oracle hard memlock 2000000
#oracle soft memlock 2000000
EOF-- 生效
ulimit -a
1.3.2. 控制给用户分配的资源
echo "session required pam_limits.so" >> /etc/pam.d/logincat /etc/pam.d/login
1.3.3. 修改进程限制数
#Amend the "/etc/security/limits.d/90-nproc.conf" file as described below. See MOS Note [ID 1487773.1]
echo "* - nproc 16384" > /etc/security/limits.d/90-nproc.conf
1.3.4. 修改内核参数
cat >> /etc/sysctl.conf << EOF
#ORACLE SETTING
fs.aio-max-nr = 1048576
fs.file-max = 6815744
kernel.shmmax = 2684354560
kernel.shmall = 655360
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
vm.nr_hugepages = 1200
net.ipv4.ip_local_port_range = 9000 65500
net.ipv4.conf.all.rp_filter = 2
net.ipv4.conf.default.rp_filter = 2
net.ipv4.ip_forward=1
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048586
kernel.panic_on_oops = 1
vm.swappiness=5
vm.min_free_kbytes=204800
EOF-- 生效
sysctl -p
- 配置参考:
kernel.shmmni = 4096
kernel.shmmax =物理内存0.8102410241024*1024
kernel.shmall = shmmax/shmmni
1.3.5. 关闭透明页,开大内存页面要关闭
cat /sys/kernel/mm/transparent_hugepage/defrag
[always] madvise never
cat /sys/kernel/mm/transparent_hugepage/enabled
[always] madvise never
这个状态说明是启用的cat >> /etc/rc.d/rc.local << EOF
if test -f /sys/kernel/mm/transparent_hugepage/enabled; then
echo never > /sys/kernel/mm/transparent_hugepage/enabled
fi
if test -f /sys/kernel/mm/transparent_hugepage/defrag; then
echo never > /sys/kernel/mm/transparent_hugepage/defrag
fi
EOFchmod +x /etc/rc.d/rc.local
1.3.6. 禁用NUMA
关闭NUMA=OFF
/etc/default/grub:
sed -ie 's/rhgb quiet/rhgb quiet numa=off/g' /etc/default/grub
cat /etc/default/grub
然后执行:
grub2-mkconfig -o /etc/grub2.cfg
用这个命令执行看下是不是一个cpu可以识别到所有的内存
numactl --show
1.3.7. 修改时区
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
hwclock
1.3.8. 配置安全
echo "SELINUX=disabled" > /etc/selinux/config
echo "#SELINUXTYPE=targeted " >> /etc/selinux/config
cat /etc/selinux/config
setenforce 0
1.3.9. 关闭防火墙
systemctl status firewalld.service
systemctl stop firewalld.service
systemctl disable firewalld.service
1.3.10. 设置内存的文件系统shm
共享内存段vi /etc/fstabnone /dev/shm tmpfs defaults,size=6144m 0 0mount -o remount /dev/shmecho "NOZEROCONF=yes" >> /etc/sysconfig/networkvi /etc/nsswitch.conf修改行hosts: files dns myhostname为hosts: files dns myhostname nis
sed -ie 's/files dns myhostname/files dns myhostname nis/g' /etc/nsswitch.conf
cat /etc/nsswitch.conf
1.3.11. 关闭校时
systemctl stop ntpd
systemctl disable ntpd
systemctl status ntpdsystemctl stop chronyd
systemctl disable chronyd
systemctl status chronyd
rm -rf /etc/chrony.conf
- 修改时间:
date -s '2025-01-07 20:00:00'
1.4. 环境变量设置
1.4.1. 主机 一:
su - grid
export LANG=en_US.UTF8
vi ~/.bash_profile PS1="[`whoami`@`hostname`:"'$PWD]$'
export PS1
umask 022
#alias sqlplus="rlwrap sqlplus"
export TMP=/tmp
export LANG=en_US
export TMPDIR=$TMP
ORACLE_SID=+ASM1; export ORACLE_SID
ORACLE_TERM=xterm; export ORACLE_TERM
ORACLE_BASE=/oracle/app/grid; export ORACLE_BASE
ORACLE_HOME=/oracle/app/19c/grid; export ORACLE_HOME
NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"; export NLS_DATE_FORMAT
PATH=.:$PATH:$HOME/bin:$ORACLE_HOME/bin; export PATH
THREADS_FLAG=native; export THREADS_FLAGif [ $USER = "oracle" ] || [ $USER = "grid" ]; thenif [ $SHELL = "/bin/ksh" ]; thenulimit -p 16384ulimit -n 65536elseulimit -u 16384 -n 65536fiumask 022
fi
su - oracle
export LANG=en_US.UTF8
vi ~/.bash_profile PS1="[`whoami`@`hostname`:"'$PWD]$'
#alias sqlplus="rlwrap sqlplus"
#alias rman="rlwrap rman"
export PS1
export TMP=/tmp
export LANG=en_US
export TMPDIR=$TMP
export ORACLE_UNQNAME=hfdb
ORACLE_BASE=/oracle/app/oracle; export ORACLE_BASE
ORACLE_HOME=$ORACLE_BASE/product/19c/db_1; export ORACLE_HOME
ORACLE_SID=hfdb1; export ORACLE_SID
ORACLE_TERM=xterm; export ORACLE_TERM
NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"; export NLS_DATE_FORMAT
NLS_LANG=AMERICAN_AMERICA.UTF8;export NLS_LANG
PATH=.:$PATH:$HOME/bin:$ORACLE_HOME/bin; export PATH
THREADS_FLAG=native; export THREADS_FLAGif [ $USER = "oracle" ] || [ $USER = "grid" ]; thenif [ $SHELL = "/bin/ksh" ]; thenulimit -p 16384ulimit -n 65536elseulimit -u 16384 -n 65536fiumask 022
fi
1.4.2. 主机二
su - grid
export LANG=en_US.UTF8
vi ~/.bash_profile PS1="[`whoami`@`hostname`:"'$PWD]$'
export PS1
umask 022
export TMP=/tmp
export LANG=en_US.UTF8
export TMPDIR=$TMP
ORACLE_SID=+ASM2; export ORACLE_SID
ORACLE_TERM=xterm; export ORACLE_TERM
ORACLE_BASE=/oracle/app/grid; export ORACLE_BASE
ORACLE_HOME=/oracle/app/19c/grid; export ORACLE_HOME
NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"; export NLS_DATE_FORMAT
PATH=.:$PATH:$HOME/bin:$ORACLE_HOME/bin; export PATH
THREADS_FLAG=native; export THREADS_FLAGif [ $USER = "oracle" ] || [ $USER = "grid" ]; thenif [ $SHELL = "/bin/ksh" ]; thenulimit -p 16384ulimit -n 65536elseulimit -u 16384 -n 65536fiumask 022
fi
su - oracle
export LANG=en_US.UTF8
vi ~/.bash_profile PS1="[`whoami`@`hostname`:"'$PWD]$'
#alias sqlplus="rlwrap sqlplus"
#alias rman="rlwrap rman"
export PS1
export TMP=/tmp
export LANG=en_US
export TMPDIR=$TMP
export ORACLE_UNQNAME=hfdb
ORACLE_BASE=/oracle/app/oracle; export ORACLE_BASE
ORACLE_HOME=$ORACLE_BASE/product/19c/db_1; export ORACLE_HOME
ORACLE_SID=hfdb2; export ORACLE_SID
ORACLE_TERM=xterm; export ORACLE_TERM
NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"; export NLS_DATE_FORMAT
NLS_LANG=AMERICAN_AMERICA.UTF8;export NLS_LANG
PATH=.:$PATH:$HOME/bin:$ORACLE_HOME/bin; export PATH
THREADS_FLAG=native; export THREADS_FLAGif [ $USER = "oracle" ] || [ $USER = "grid" ]; thenif [ $SHELL = "/bin/ksh" ]; thenulimit -p 16384ulimit -n 65536elseulimit -u 16384 -n 65536fiumask 022
fi
1.4.3. 配置ssh信任关系(在一台上面操作就可)
./sshUserSetup.sh -user grid -hosts "hfdb90 hfdb91" -advanced -noPromptPassphrase./sshUserSetup.sh -user oracle -hosts "hfdb90 hfdb91" -advanced -noPromptPassphrase
- 测试:不用认证,返回时间就是配置成功正常。
[grid@hfdb90:/home/grid]$ssh hfdb91 date
[grid@hfdb91:/home/grid]$ssh hfdb90 date
1.5. ASM磁盘配置
1.5.1. 磁盘配置(一台主机操作即可)
[root@hfdb90 soft]# fdisk -l |grep 'Disk /dev'
Disk /dev/sdb: 1 GiB, 1073741824 bytes, 2097152 sectors
Disk /dev/sdd: 1 GiB, 1073741824 bytes, 2097152 sectors
Disk /dev/sdf: 5 GiB, 5368709120 bytes, 10485760 sectors
Disk /dev/sda: 100 GiB, 107374182400 bytes, 209715200 sectors
Disk /dev/sdc: 1 GiB, 1073741824 bytes, 2097152 sectors
Disk /dev/sde: 8 GiB, 8589934592 bytes, 16777216 sectors
Disk /dev/sdg: 5 GiB, 5368709120 bytes, 10485760 sectors[root@hfdb91 yum.repos.d]# fdisk -l |grep 'Disk /dev'
Disk /dev/sda: 100 GiB, 107374182400 bytes, 209715200 sectors
Disk /dev/sdb: 1 GiB, 1073741824 bytes, 2097152 sectors
Disk /dev/sdc: 1 GiB, 1073741824 bytes, 2097152 sectors
Disk /dev/sdd: 1 GiB, 1073741824 bytes, 2097152 sectors
Disk /dev/sde: 8 GiB, 8589934592 bytes, 16777216 sectors
Disk /dev/sdf: 5 GiB, 5368709120 bytes, 10485760 sectors
Disk /dev/sdg: 5 GiB, 5368709120 bytes, 10485760 sectors
for i in {b..g};
do
echo "KERNEL==\"sd?\",SUBSYSTEM==\"block\", PROGRAM==\"/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/\$name\",RESULT==\"`/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/sd$i`\", SYMLINK+=\"asm-disk$i\",OWNER=\"grid\", GROUP=\"asmadmin\",MODE=\"0660\""
done
KERNEL=="sd?",SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name",RESULT=="1ATA_VBOX_HARDDISK_VBae83b083-0f585486", SYMLINK+="asm-grid1",OWNER="grid", GROUP="asmadmin",MODE="0660"
KERNEL=="sd?",SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name",RESULT=="1ATA_VBOX_HARDDISK_VBf5d2c236-c5f7b83c", SYMLINK+="asm-grid2",OWNER="grid", GROUP="asmadmin",MODE="0660"
KERNEL=="sd?",SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name",RESULT=="1ATA_VBOX_HARDDISK_VBd81a6bd6-39cb420b", SYMLINK+="asm-grid3",OWNER="grid", GROUP="asmadmin",MODE="0660"
KERNEL=="sd?",SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name",RESULT=="1ATA_VBOX_HARDDISK_VBb82d54e1-12490fc1", SYMLINK+="asm-dbsystem",OWNER="grid", GROUP="asmadmin",MODE="0660"
KERNEL=="sd?",SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name",RESULT=="1ATA_VBOX_HARDDISK_VB02af7605-5ae982f5", SYMLINK+="asm-dbrecovery",OWNER="grid", GROUP="asmadmin",MODE="0660"
KERNEL=="sd?",SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name",RESULT=="1ATA_VBOX_HARDDISK_VB685e5cd2-ea464e8b", SYMLINK+="asm-dbdata",OWNER="grid", GROUP="asmadmin",MODE="0660"
1.5.2. 配置规则(两台主机都要配置)
vi /etc/udev/rules.d/99-oracle-asmdevices.rules --两个主机都要做
/sbin/udevadm trigger --type=devices --action=change
ls -lsa /dev/asm*
ls -lsa /dev/sd* -权限确认
1.6. Grid集群安装
1.6.1. 上传安装软件
新建/soft 目录
LINUX.X64_193000_grid_home.zip -- grid 安装包
p35940989_190000_Linux-x86-64.zip --19.22 grid 补丁
p6880880_190000_Linux-x86-64.zip --Opatch 工具
su -grid
cd /oracle/app/19c/grid -- 解压
unzip -q /soft/LINUX.X64_193000_grid_home.zip
-- root: export CVUQDISK_GRP=oinstall
rpm -ivh /oracle/app/19c/grid/cv/rpm/cvuqdisk-1.0.10-1.rpm -- 第一台上安装-- 第二台上面安装
scp /oracle/app/19c/grid/cv/rpm/cvuqdisk-1.0.10-1.rpm hfdb91:/soft
rpm -ivh /oracle/app/19c/grid/cv/rpm/cvuqdisk-1.0.10-1.rpm
1.6.2. 解压补丁包
mkdir p35
cd p35
unzip -q /soft/p35940989_190000_Linux-x86-64.zip-- 备份之前的Opatch
cd /oracle/app/19c/gridmv OPatch/ OPatch.bak
su - grid -c "unzip -q -o /soft/p6880880_190000_Linux-x86-64.zip -d /oracle/app/19c/grid"
1.6.3. 安装vnc(vnc 中无法打开 Terminal 终端)
yum install vnc
启动vnc :vncserver
在 vnc 中执行:
cd/oracle/app/19c/grid
./gridSetup.sh -applyRU /soft/p35/35940989/
安装grid软件,使用vnc打开grid用户终端,始终打不开,无法进行安装操作。
1.6.4. 使用 X display 安装
报错 1:ERROR: Unable to verify the graphical display setup. This application requires X display. Make sure that xdpyinfo exist under PATH variable.
解决 1:yum -y install xdpyinfo
报错 2:
/oracle/app/19c/grid/perl/bin/perl: error while loading shared libraries: libnsl.so.1: cannot open shared object file: No such file or directory
[root@hfdb90 ~]# ldd /oracle/app/19c/grid/perl/bin/perllinux-vdso.so.1 (0x00007ffdeeb29000)libpthread.so.0 => /usr/lib64/libpthread.so.0 (0x00007fdbe524c000)libnsl.so.1 => not foundlibdl.so.2 => /usr/lib64/libdl.so.2 (0x00007fdbe5247000)libm.so.6 => /usr/lib64/libm.so.6 (0x00007fdbe50c4000)libcrypt.so.1 => /usr/lib64/libcrypt.so.1 (0x00007fdbe5089000)libutil.so.1 => /usr/lib64/libutil.so.1 (0x00007fdbe5084000)libc.so.6 => /usr/lib64/libc.so.6 (0x00007fdbe4eca000)libgcc_s.so.1 => /usr/lib64/libgcc_s.so.1 (0x00007fdbe4eb1000)/lib64/ld-linux-x86-64.so.2 (0x00007fdbe5289000)
[root@hfdb90 ~]#
解决 2:
拷贝一个 libnsl-2.17.so 文件,云盘中有(两个节点都要执行)
ln -s libnsl-2.17.so /usr/lib64/libnsl.so.1
ls -rtl /usr/lib64/libnsl*
报错:ERROR: The home is not clean. This home cannot be used since there was a failed OPatch execution in this home. Use a different home to proceed.
解决:cd /oracle/app/19c/grid/install
[root@hfdb90 install]# mv patch patch.bak
--报错:
[grid@hfdb90:/oracle/app/19c/grid]$./gridSetup.sh -applyPSU /soft/p35/35940989/
Preparing the home to patch...
Applying the patch /soft/p35/35940989/...
OPatch command failed while applying the patch. For details look at the logs from /oracle/app/19c/grid/cfgtoollogs/opatchauto/.
解决:
chown -R grid:oinstall /soft
chmod -R 775 /soft
使用 grid 用户安装:
cd/oracle/app/19c/grid
./gridSetup.sh -applyRU /soft/p35/35940989/
-- 报错:INS-06006错误处理#重命名
mv /usr/bin/scp /usr/bin/scp.orig# 创建新的scp
vi /usr/bin/scp
/usr/bin/scp.orig -T $*# 修改权限
chmod 555 /usr/bin/scp#安装完成后改回来
mv /usr/bin/scp.orig /usr/bin/scp--上面设置完成后,还是继续报错
使用[grid@hfdb90:/oracle/app/19c/grid]$./gridSetup.sh -debug #打印日志
ln -s libnsl-2.17.so /usr/lib64/libnsl.so.1
ls -rtl /usr/lib64/libnsl*
配置完成后,检查通过。
报错:
An internal error occurred within cluster verification framework
An error occurred in creating a TaskFactory object or in generating a task list
PRVG-0282 : failed to retrieve the operating system distribution ID
解决:
因Oracle本身不支持该操作系统故修改 /opt/app/grid/ghome/cv/admin/cvu_config ,在文件末尾添加CV_ASSUME_DISTID=RHEL7,以RHEL7为安装标准。
下面的错误可以忽略:
执行 root 脚本:
- 两台主机上面都要执行,执行完成一台再执行另外一台。
/oracle/app/oraInventory/orainstRoot.sh
/oracle/app/19c/grid/root.sh
--执行的时间比较长,耐心等待第一台执行完后再执行第二台,执行的时间可以参考
[root@hfdb90 init.d]# /oracle/app/oraInventory/orainstRoot.sh
Changing permissions of /oracle/app/oraInventory.
Adding read,write permissions for group.
Removing read,write,execute permissions for world.Changing groupname of /oracle/app/oraInventory to oinstall.
The execution of the script is complete.
[root@hfdb90 init.d]# /oracle/app/19c/grid/root.sh
Performing root user operation.The following environment variables are set as:ORACLE_OWNER= gridORACLE_HOME= /oracle/app/19c/gridEnter the full pathname of the local bin directory: [/usr/local/bin]:Copying dbhome to /usr/local/bin ...Copying oraenv to /usr/local/bin ...Copying coraenv to /usr/local/bin ...Creating /etc/oratab file...
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Relinking oracle with rac_on option
Using configuration parameter file: /oracle/app/19c/grid/crs/install/crsconfig_params
The log of current session can be found at:/oracle/app/grid/crsdata/hfdb90/crsconfig/rootcrs_hfdb90_2025-01-07_04-10-54PM.log
2025/01/07 16:11:13 CLSRSC-594: Executing installation step 1 of 19: 'ValidateEnv'.
2025/01/07 16:11:13 CLSRSC-594: Executing installation step 2 of 19: 'CheckFirstNode'.
2025/01/07 16:11:16 CLSRSC-594: Executing installation step 3 of 19: 'GenSiteGUIDs'.
2025/01/07 16:11:18 CLSRSC-594: Executing installation step 4 of 19: 'SetupOSD'.
Redirecting to /bin/systemctl restart rsyslog.service
2025/01/07 16:11:19 CLSRSC-594: Executing installation step 5 of 19: 'CheckCRSConfig'.
2025/01/07 16:11:20 CLSRSC-594: Executing installation step 6 of 19: 'SetupLocalGPNP'.
2025/01/07 16:11:37 CLSRSC-594: Executing installation step 7 of 19: 'CreateRootCert'.
2025/01/07 16:11:43 CLSRSC-594: Executing installation step 8 of 19: 'ConfigOLR'.
2025/01/07 16:11:59 CLSRSC-594: Executing installation step 9 of 19: 'ConfigCHMOS'.
2025/01/07 16:11:59 CLSRSC-594: Executing installation step 10 of 19: 'CreateOHASD'.
2025/01/07 16:12:07 CLSRSC-594: Executing installation step 11 of 19: 'ConfigOHASD'.
2025/01/07 16:12:08 CLSRSC-330: Adding Clusterware entries to file 'oracle-ohasd.service'
2025/01/07 16:12:42 CLSRSC-594: Executing installation step 12 of 19: 'SetupTFA'.
2025/01/07 16:12:42 CLSRSC-594: Executing installation step 13 of 19: 'InstallAFD'.
2025/01/07 16:12:43 CLSRSC-594: Executing installation step 14 of 19: 'InstallACFS'.
2025/01/07 16:12:50 CLSRSC-594: Executing installation step 15 of 19: 'InstallKA'.
2025/01/07 16:12:57 CLSRSC-594: Executing installation step 16 of 19: 'InitConfig'.ASM has been created and started successfully.[DBT-30001] Disk groups created successfully. Check /oracle/app/grid/cfgtoollogs/asmca/asmca-250107PM041429.log for d etails.2025/01/07 16:15:51 CLSRSC-482: Running command: '/oracle/app/19c/grid/bin/ocrconfig -upgrade grid oinstall'
CRS-4256: Updating the profile
Successful addition of voting disk dffda02ed6f94f33bf603f3f2938d09a.
Successful addition of voting disk 42f6909991584f84bfb6c09f456292de.
Successful addition of voting disk 95c06b4106ac4facbfdf0f09e3abd846.
Successfully replaced voting disk group with +DGGRID.
CRS-4256: Updating the profile
CRS-4266: Voting file(s) successfully replaced
## STATE File Universal Id File Name Disk group
-- ----- ----------------- --------- ---------1. ONLINE dffda02ed6f94f33bf603f3f2938d09a (/dev/sdb) [DGGRID]2. ONLINE 42f6909991584f84bfb6c09f456292de (/dev/sdc) [DGGRID]3. ONLINE 95c06b4106ac4facbfdf0f09e3abd846 (/dev/sdd) [DGGRID]
Located 3 voting disk(s).
2025/01/07 16:17:17 CLSRSC-594: Executing installation step 17 of 19: 'StartCluster'.
2025/01/07 16:17:40 CLSRSC-4002: Successfully installed Oracle Trace File Analyzer (TFA) Collector.
2025/01/07 16:18:21 CLSRSC-343: Successfully started Oracle Clusterware stack
2025/01/07 16:18:21 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.
2025/01/07 16:19:48 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.
2025/01/07 16:20:18 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded
安装完成:
1.7. Asm磁盘配置
[grid@hfdb90 grid]$ asmca
配置完成后,退出即可。
1.8. 数据库软件安装
1.8.1. 上传软件:
LINUX.X64_193000_db_home.zip --db 软件
p35943157_190000_Linux-x86-64.zip --19.22 补丁
1.8.2. 解压软件
--解压db软件
su - oracle
cd /oracle/app/oracle/product/19c/db_1
unzip -q /soft/LINUX.X64_193000_db_ home.zip--解压补丁
cd /soft
mkdir p35ora
cd p35ora
unzip -q /soft/p35943157_190000_Linux-x86-64.zip
1.8.3. 更新 Opatch
--root 下
-- 备份之前的Opatch
cd /oracle/app/oracle/product/19c/db_1mv OPatch/ OPatch.bakchown -R oracle:oinstall /soft
su - oracle -c "unzip -q -o /soft/p6880880_190000_Linux-x86-64.zip -d /oracle/app/oracle/product/19c/db_1"
1.8.4. 安装 db 软件
su -oracle
cd/oracle/app/oracle/product/19c/db_1
./runInstaller -applyRU /soft/p35ora/35943157
报错:
An internal error occurred within cluster verification framework
An error occurred in creating a TaskFactory object or in generating a task list
PRVG-0282 : failed to retrieve the operating system distribution ID
解决: vi /oracle/app/oracle/product/19c/db_1/cv/admin/cvu_config
使用 root 用户再两个节点上面执行。
/oracle/app/oracle/product/19c/db_1/root.sh
1.9. 数据库安装:
[oracle@hfdb90:/oracle/app/oracle/product/19c/db_1]$dbca
安装完成。
1.10. 登录检查:
[root@hfdb90 soft]# crsctl status res -t
bash: crsctl: command not found...
Failed to search for file: repodata EL was not complete: Cannot open /mnt/repodata/repomd.xml: 没有那个文件或目录
[root@hfdb90 soft]# vi ~/.bash_profile
添加:
重新登录 root 用户,配置生效。
1.10.1. 检查 crs 集群
[root@hfdb90 ~]# crsctl status res -t
--------------------------------------------------------------------------------
Name Target State Server State details
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.LISTENER.lsnrONLINE ONLINE hfdb90 STABLEONLINE ONLINE hfdb91 STABLE
ora.chadONLINE ONLINE hfdb90 STABLEONLINE ONLINE hfdb91 STABLE
ora.net1.networkONLINE ONLINE hfdb90 STABLEONLINE ONLINE hfdb91 STABLE
ora.onsONLINE ONLINE hfdb90 STABLEONLINE ONLINE hfdb91 STABLE
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.ASMNET1LSNR_ASM.lsnr(ora.asmgroup)1 ONLINE ONLINE hfdb90 STABLE2 ONLINE ONLINE hfdb91 STABLE
ora.DGDATA.dg(ora.asmgroup)1 ONLINE ONLINE hfdb90 STABLE2 ONLINE ONLINE hfdb91 STABLE
ora.DGGRID.dg(ora.asmgroup)1 ONLINE ONLINE hfdb90 STABLE2 ONLINE ONLINE hfdb91 STABLE
ora.DGRECOVERY.dg(ora.asmgroup)1 ONLINE ONLINE hfdb90 STABLE2 ONLINE ONLINE hfdb91 STABLE
ora.DGSYSTEM.dg(ora.asmgroup)1 ONLINE ONLINE hfdb90 STABLE2 ONLINE ONLINE hfdb91 STABLE
ora.LISTENER_SCAN1.lsnr1 ONLINE ONLINE hfdb90 STABLE
ora.asm(ora.asmgroup)1 ONLINE ONLINE hfdb90 Started,STABLE2 ONLINE ONLINE hfdb91 Started,STABLE
ora.asmnet1.asmnetwork(ora.asmgroup)1 ONLINE ONLINE hfdb90 STABLE2 ONLINE ONLINE hfdb91 STABLE
ora.cvu1 ONLINE ONLINE hfdb90 STABLE
ora.hfdb.db1 ONLINE ONLINE hfdb90 Open,HOME=/oracle/app/oracle/product/19c/db_1,STABLE2 ONLINE ONLINE hfdb91 Open,HOME=/oracle/app/oracle/product/19c/db_1,STABLE
ora.hfdb90.vip1 ONLINE ONLINE hfdb90 STABLE
ora.hfdb91.vip1 ONLINE ONLINE hfdb91 STABLE
ora.qosmserver1 ONLINE ONLINE hfdb90 STABLE
ora.scan1.vip1 ONLINE ONLINE hfdb90 STABLE
--------------------------------------------------------------------------------
1.10.2. 监听检查
[root@hfdb90 ~]# su - grid
Last login: Tue Jan 7 20:20:33 CST 2025
[grid@hfdb90:/home/grid]$lsnrctl statusLSNRCTL for Linux: Version 19.0.0.0.0 - Production on 07-JAN-2025 20:21:54Copyright (c) 1991, 2023, Oracle. All rights reserved.Connecting to (DESCRIPTION=(ADDRESS=(PROTOCOL=IPC)(KEY=LISTENER)))
STATUS of the LISTENER
------------------------
Alias LISTENER
Version TNSLSNR for Linux: Version 19.0.0.0.0 - Production
Start Date 07-JAN-2025 16:20:08
Uptime 0 days 4 hr. 1 min. 46 sec
Trace Level off
Security ON: Local OS Authentication
SNMP OFF
Listener Parameter File /oracle/app/19c/grid/network/admin/listener.ora
Listener Log File /oracle/app/grid/diag/tnslsnr/hfdb90/listener/alert/log.xml
Listening Endpoints Summary...(DESCRIPTION=(ADDRESS=(PROTOCOL=ipc)(KEY=LISTENER)))(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.40.90)(PORT=1521)))(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.40.92)(PORT=1521)))(DESCRIPTION=(ADDRESS=(PROTOCOL=tcps)(HOST=hfdb90)(PORT=5500))(Security=(my_wallet_directory=/oracle/app/oracle/product/19c/db_1/admin/hfdb/xdb_wallet))(Presentation=HTTP)(Session=RAW))
Services Summary...
Service "+ASM" has 1 instance(s).Instance "+ASM1", status READY, has 1 handler(s) for this service...
Service "+ASM_DGDATA" has 1 instance(s).Instance "+ASM1", status READY, has 1 handler(s) for this service...
Service "+ASM_DGGRID" has 1 instance(s).Instance "+ASM1", status READY, has 1 handler(s) for this service...
Service "+ASM_DGRECOVERY" has 1 instance(s).Instance "+ASM1", status READY, has 1 handler(s) for this service...
Service "+ASM_DGSYSTEM" has 1 instance(s).Instance "+ASM1", status READY, has 1 handler(s) for this service...
Service "2b1d9602bd721a65e0635a28a8c063ad" has 1 instance(s).Instance "hfdb1", status READY, has 1 handler(s) for this service...
Service "86b637b62fdf7a65e053f706e80a27ca" has 1 instance(s).Instance "hfdb1", status READY, has 1 handler(s) for this service...
Service "hfdb" has 1 instance(s).Instance "hfdb1", status READY, has 1 handler(s) for this service...
Service "hfdbXDB" has 1 instance(s).Instance "hfdb1", status READY, has 1 handler(s) for this service...
Service "pdb" has 1 instance(s).Instance "hfdb1", status READY, has 1 handler(s) for this service...
The command completed successfully
[grid@hfdb90:/home/grid]$
1.10.3. 检查数据库实例
以上步骤验证完成,数据库安装正常。
通过百度网盘分享的文件:安装包
链接:百度网盘 请输入提取码