From 73fc5e48f3efffd311a8ca5cc74c4b094855e02e Mon Sep 17 00:00:00 2001 From: Saurabh Ahuja Date: Thu, 28 Mar 2024 11:12:10 +0530 Subject: [PATCH] common scripts added (#2776) * common scripts added * updated license and python3 packages as per feedback from Avi --- .../common/scripts/cmdExec | 22 + .../common/scripts/main.py | 173 + .../common/scripts/oraasmca.py | 148 + .../common/scripts/oracommon.py | 2987 +++++++++++++++++ .../common/scripts/oracvu.py | 261 ++ .../common/scripts/oraenv.py | 132 + .../common/scripts/orafactory.py | 165 + .../common/scripts/oragiadd.py | 314 ++ .../common/scripts/oragiprov.py | 561 ++++ .../common/scripts/oragridadd.py | 53 + .../common/scripts/oralogger.py | 182 + .../common/scripts/oramachine.py | 63 + .../common/scripts/oramiscops.py | 426 +++ .../common/scripts/oraracadd.py | 225 ++ .../common/scripts/oraracdel.py | 272 ++ .../common/scripts/oraracprov.py | 544 +++ .../common/scripts/oraracstdby.py | 643 ++++ .../common/scripts/orasetupenv.py | 694 ++++ .../common/scripts/orasshsetup.py | 213 ++ .../common/scripts/setupSSH.expect | 46 + .../dockerfiles/19.3.0/scripts | 1 + .../dockerfiles/21.3.0/scripts | 1 + 22 files changed, 8126 insertions(+) create mode 100644 OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/cmdExec create mode 100644 OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/main.py create mode 100644 OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraasmca.py create mode 100755 OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oracommon.py create mode 100644 OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oracvu.py create mode 100644 OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraenv.py create mode 100644 OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/orafactory.py create mode 100644 OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oragiadd.py create mode 100644 OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oragiprov.py create mode 100644 OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oragridadd.py create mode 100644 OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oralogger.py create mode 100644 OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oramachine.py create mode 100644 OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oramiscops.py create mode 100644 OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracadd.py create mode 100644 OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracdel.py create mode 100755 OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracprov.py create mode 100644 OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracstdby.py create mode 100644 OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/orasetupenv.py create mode 100644 OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/orasshsetup.py create mode 100644 OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/setupSSH.expect create mode 120000 OracleDatabase/RAC/OracleRealApplicationClusters/dockerfiles/19.3.0/scripts create mode 120000 OracleDatabase/RAC/OracleRealApplicationClusters/dockerfiles/21.3.0/scripts diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/cmdExec b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/cmdExec new file mode 100644 index 0000000000..45bed45bef --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/cmdExec @@ -0,0 +1,22 @@ +#!/bin/bash + +############################# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ + +TIMESTAMP=$(date "+%Y-%m-%d") +LOGFILE="/tmp/oracle_rac_cmd_${TIMESTAMP}.log" +# shellcheck disable=SC2145 +echo "$(date -u) : $@" >> "$LOGFILE" + +cmd=("$@") +# shellcheck disable=SC2128 +$cmd +# shellcheck disable=SC2181 +if [ $? -eq 0 ]; then + exit 0 +else + exit 127 +fi diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/main.py b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/main.py new file mode 100644 index 0000000000..dbfd825c94 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/main.py @@ -0,0 +1,173 @@ +#!/usr/bin/python3 + +############################# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ + +""" +This is the main file which calls other file to setup the sharding. +""" + +from oralogger import * +from orafactory import * +from oraenv import * +from oracommon import * + + +def main(): + + # Checking Comand line Args + opts="" + try: + opts, args = getopt.getopt(sys.argv[1:], '', ['help','resetpassword=','delracnode=','addtns=', 'checkracinst=', 'checkgilocal=','checkdbrole=','checkracdb=','checkconnstr=','checkpdbconnstr=','setupdblsnr=','setuplocallsnr=']) + except getopt.GetoptError: + pass + + # Initializing oraenv instance + oenv=OraEnv() + file_name = os.path.basename(__file__) + funcname = sys._getframe(1).f_code.co_name + + log_file_name = oenv.logfile_name("NONE") + + # Initialiing logger instance + oralogger = OraLogger(log_file_name) + console_handler = CHandler() + file_handler = FHandler() + stdout_handler = StdHandler() + # Setting next log handlers + stdout_handler.nextHandler = file_handler + file_handler.nextHandler = console_handler + console_handler.nextHandler = PassHandler() + + ocommon = OraCommon(oralogger,stdout_handler,oenv) + + for opt, arg in opts: + if opt in ('--help'): + oralogger.msg_ = '''{:^17}-{:^17} : You can pass parameter --help''' + stdout_handler.handle(oralogger) + elif opt in ('--resetpassword'): + file_name = oenv.logfile_name("RESET_PASSWORD") + oralogger.filename_ = file_name + ocommon.log_info_message("=======================================================================",file_name) + oenv.add_custom_variable("RESET_PASSWORD",arg) + elif opt in ('--delracnode'): + file_name = oenv.logfile_name("DEL_PARAMS") + oralogger.filename_ = file_name + ocommon.log_info_message("=======================================================================",file_name) + oenv.add_custom_variable("DEL_PARAMS",arg) + oenv.add_custom_variable("CUSTOM_RUN_FLAG","true") + oenv.add_custom_variable("DEL_RACHOME","true") + oenv.add_custom_variable("DEL_GIHOME","true") + if ocommon.check_key("OP_TYPE",oenv.get_env_dict()): + oenv.update_key("OP_TYPE","racdelnode") + else: + oenv.add_custom_variable("OP_TYPE","miscops") + elif opt in ('--addtns'): + file_name = oenv.logfile_name("ADD_TNS") + oralogger.filename_ = file_name + ocommon.log_info_message("=======================================================================",file_name) + oenv.add_custom_variable("TNS_PARAMS",arg) + oenv.add_custom_variable("CUSTOM_RUN_FLAG","true") + if ocommon.check_key("OP_TYPE",oenv.get_env_dict()): + oenv.update_key("OP_TYPE","racdelnode") + else: + oenv.add_custom_variable("OP_TYPE","miscops") + elif opt in ('--checkracinst'): + file_name = oenv.logfile_name("CHECK_RAC_INST") + oralogger.filename_ = file_name + ocommon.log_info_message("=======================================================================",file_name) + oenv.add_custom_variable("CHECK_RAC_INST",arg) + oenv.add_custom_variable("CUSTOM_RUN_FLAG","true") + if ocommon.check_key("OP_TYPE",oenv.get_env_dict()): + oenv.update_key("OP_TYPE","miscops") + else: + oenv.add_custom_variable("OP_TYPE","miscops") + elif opt in ('--checkgilocal'): + file_name = oenv.logfile_name("CHECK_GI_LOCAL") + oralogger.filename_ = file_name + ocommon.log_info_message("=======================================================================",file_name) + oenv.add_custom_variable("CHECK_GI_LOCAL",arg) + oenv.add_custom_variable("CUSTOM_RUN_FLAG","true") + if ocommon.check_key("OP_TYPE",oenv.get_env_dict()): + oenv.update_key("OP_TYPE","miscops") + else: + oenv.add_custom_variable("OP_TYPE","miscops") + elif opt in ('--checkracdb'): + file_name = oenv.logfile_name("CHECK_RAC_DB") + oralogger.filename_ = file_name + ocommon.log_info_message("=======================================================================",file_name) + oenv.add_custom_variable("CHECK_RAC_DB",arg) + oenv.add_custom_variable("CUSTOM_RUN_FLAG","true") + if ocommon.check_key("OP_TYPE",oenv.get_env_dict()): + oenv.update_key("OP_TYPE","miscops") + else: + oenv.add_custom_variable("OP_TYPE","miscops") + elif opt in ('--checkdbrole'): + file_name = oenv.logfile_name("CHECK_DB_ROLE") + oralogger.filename_ = file_name + ocommon.log_info_message("=======================================================================",file_name) + oenv.add_custom_variable("CHECK_DB_ROLE",arg) + oenv.add_custom_variable("CUSTOM_RUN_FLAG","true") + if ocommon.check_key("OP_TYPE",oenv.get_env_dict()): + oenv.update_key("OP_TYPE","miscops") + else: + oenv.add_custom_variable("OP_TYPE","miscops") + elif opt in ('--checkconnstr'): + file_name = oenv.logfile_name("CHECK_CONNECT_STR") + oralogger.filename_ = file_name + ocommon.log_info_message("=======================================================================",file_name) + oenv.add_custom_variable("CHECK_CONNECT_STR",arg) + oenv.add_custom_variable("CUSTOM_RUN_FLAG","true") + if ocommon.check_key("OP_TYPE",oenv.get_env_dict()): + oenv.update_key("OP_TYPE","miscops") + else: + oenv.add_custom_variable("OP_TYPE","miscops") + elif opt in ('--checkpdbconnstr'): + file_name = oenv.logfile_name("CHECK_PDB_CONNECT_STR") + oralogger.filename_ = file_name + ocommon.log_info_message("=======================================================================",file_name) + oenv.add_custom_variable("CHECK_PDB_CONNECT_STR",arg) + oenv.add_custom_variable("CUSTOM_RUN_FLAG","true") + if ocommon.check_key("OP_TYPE",oenv.get_env_dict()): + oenv.update_key("OP_TYPE","miscops") + else: + oenv.add_custom_variable("OP_TYPE","miscops") + elif opt in ('--setupdblsnr'): + file_name = oenv.logfile_name("SETUP_DB_LSNR") + oralogger.filename_ = file_name + ocommon.log_info_message("=======================================================================",file_name) + oenv.add_custom_variable("NEW_DB_LSNR_ENDPOINTS",arg) + oenv.add_custom_variable("CUSTOM_RUN_FLAG","true") + if ocommon.check_key("OP_TYPE",oenv.get_env_dict()): + oenv.update_key("OP_TYPE","miscops") + elif opt in ('--setuplocallsnr'): + file_name = oenv.logfile_name("SETUP_LOCAL_LSNR") + oralogger.filename_ = file_name + ocommon.log_info_message("=======================================================================",file_name) + oenv.add_custom_variable("NEW_LOCAL_LISTENER",arg) + oenv.add_custom_variable("CUSTOM_RUN_FLAG","true") + if ocommon.check_key("OP_TYPE",oenv.get_env_dict()): + oenv.update_key("OP_TYPE","miscops") + else: + oenv.add_custom_variable("OP_TYPE","miscops") + else: + pass + + # Initializing orafactory instances + oralogger.msg_ = '''{:^17}-{:^17} : Calling OraFactory to start the setup'''.format(file_name,funcname) + stdout_handler.handle(oralogger) + orafactory = OraFactory(oralogger,stdout_handler,oenv,ocommon) + + # Get the ora objects + ofactory=orafactory.get_ora_objs() + + # Traverse through returned factory objects and execute the setup function + for obj in ofactory: + obj.setup() + +# Using the special variable +if __name__=="__main__": + main() diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraasmca.py b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraasmca.py new file mode 100644 index 0000000000..4672e01e9a --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraasmca.py @@ -0,0 +1,148 @@ +#!/usr/bin/python3 + +############################# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ + +""" + This file contains to the code call different classes objects based on setup type +""" + +from oralogger import * +from oraenv import * +from oracommon import * +from oramachine import * +from orasetupenv import * +from orasshsetup import * +from oraracadd import * + +import os +import sys + +class OraAsmca: + """ + This class performs the ASMCA operations + """ + def __init__(self,oralogger,orahandler,oraenv,oracommon,oracvu,orasetupssh): + try: + self.ologger = oralogger + self.ohandler = orahandler + self.oenv = oraenv.get_instance() + self.ocommon = oracommon + self.ocvu = oracvu + self.orasetupssh = orasetupssh + self.ora_env_dict = oraenv.get_env_vars() + self.file_name = os.path.basename(__file__) + except BaseException as ex: + ex_type, ex_value, ex_traceback = sys.exc_info() + trace_back = traceback.extract_tb(ex_traceback) + stack_trace = list() + for trace in trace_back: + stack_trace.append("File : %s , Line : %d, Func.Name : %s, Message : %s" % (trace[0], trace[1], trace[2], trace[3])) + self.ocommon.log_info_message(ex_type.__name__,self.file_name) + self.ocommon.log_info_message(ex_value,self.file_name) + self.ocommon.log_info_message(stack_trace,self.file_name) + + def setup(self): + """ + This function setup the grid on this machine + """ + pass + + def validate_dg(self,device_list,device_prop,type): + """ + Check dg if it exist + """ + giuser,gihome,gbase,oinv=self.ocommon.get_gi_params() + device_prop,cname,cred,casm,crdbms,asdvm,cuasize=self.get_device_prop(device_prop,type) + self.ocommon.log_info_message("device prop set to :" + device_prop + " DG Name: " + cname + " Redudancy : " + cred, self.file_name) + cmd='''su - {0} -c "{1}/bin/asmcmd lsdg {2}"'''.format(giuser,gihome,cname) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + if self.ocommon.check_substr_match(output,cname): + return True + else: + return False + + def create_dg(self,device_list,device_prop,type): + """ + This function creates the disk group + """ + giuser,gihome,gbase,oinv=self.ocommon.get_gi_params() + disk_lst=self.get_device_list(device_list) + self.ocommon.log_info_message("The type is set to :" + type,self.file_name) + device_prop,cname,cred,casm,crdbms,asdvm,cuasize=self.get_device_prop(device_prop,type) + self.ocommon.log_info_message("device prop set to :" + device_prop + " DG Name: " + cname + " Redudancy : " + cred, self.file_name) + cmd='''su - {0} -c "{1}/bin/asmca -silent -createDiskGroup {3} {2}"'''.format(giuser,gihome,disk_lst,device_prop) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + + def get_device_list(self,device_list): + """ + This function returns the device_list + """ + disklst="" + for disk in device_list.split(','): + disklst +=""" -disk '{0}'""".format(disk) + + if disklst: + return disklst + else: + self.ocommon.log_error_message("disk string is set to None for diskgroup creation. Exiting..",self.file_name) + self.prog_exit("127") + + def get_device_prop(self,device_prop,type): + """ + This function returns the device_props + """ + cname="" + cred="" + casm="" + crdbms="" + cadvm="" + causize="" + cmd="" + + self.ocommon.log_info_message("The type is set to :" + type,self.file_name) + if device_prop: + cvar_dict=dict(item.split("=") for item in device_prop.split(";")) + for ckey in cvar_dict.keys(): + if ckey == 'name': + cname = cvar_dict[ckey] + if ckey == 'redundancy': + cred = cvar_dict[ckey] + if ckey == 'compatibleasm': + casm = cvar_dict[ckey] + if ckey == 'compatiblerdbms': + crdbms = cvar_dict[ckey] + if ckey == 'compatibleadvm': + cadvm = cvar_dict[ckey] + if ckey == 'au_size': + causize = cvar_dict[ckey] + + if not cname: + cmd +=''' -diskGroupName {0}'''.format(type) + cname=type + else: + cmd +=''' -diskGroupName {0}'''.format(cname) + if not cred: + cmd +=''' -redundancy {0}'''.format("EXTERNAL") + cred="EXTERNAL" + else: + cmd +=''' -redundancy {0}'''.format(cred) + if casm: + cmd +=""" -compatible.asm '{0}'""".format(casm) + if crdbms: + cmd +=""" -compatible.rdbms '{0}'""".format(crdbms) + if cadvm: + cmd +=""" -compatible.advm '{0}'""".format(cadvm) + if causize: + cmd +=""" -au_size '{0}'""".format(causize) + + if cmd: + return cmd,cname,cred,casm,crdbms,cadvm,causize + else: + self.ocommon.log_error_message("CMD is set to None for diskgroup creation. Exiting..",self.file_name) + self.ocommon.prog_exit("127") diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oracommon.py b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oracommon.py new file mode 100755 index 0000000000..d996bc34cb --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oracommon.py @@ -0,0 +1,2987 @@ +#!/usr/bin/python3 + +############################# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ + +from oralogger import * +from oraenv import * +import subprocess +import sys +import time +import datetime +import os +import getopt +import shlex +import json +import logging +import socket +import re +import os.path +import socket +import stat +import itertools +import string +import random +import glob +import pathlib + +class OraCommon: + def __init__(self,oralogger,orahandler,oraenv): + self.ologger = oralogger + self.ohandler = orahandler + self.oenv = oraenv.get_instance() + self.ora_env_dict = oraenv.get_env_vars() + self.file_name = os.path.basename(__file__) + + def run_sqlplus(self,cmd,sql_cmd,dbenv): + """ + This function execute the ran sqlplus or rman script and return the output + """ + try: + message="Received Command : {0}\n{1}".format(self.mask_str(cmd),self.mask_str(sql_cmd)) + self.log_info_message(message,self.file_name) + sql_cmd=self.unmask_str(sql_cmd) + cmd=self.unmask_str(cmd) +# message="Received Command : {0}\n{1}".format(cmd,sql_cmd) +# self.log_info_message(message,self.file_name) + p = subprocess.Popen(cmd,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE,env=dbenv,shell=True) + p.stdin.write(sql_cmd.encode()) + # (stdout,stderr), retcode = p.communicate(sqlplus_script.encode('utf-8')), p.returncode + (stdout,stderr),retcode = p.communicate(),p.returncode + # stdout_lines = stdout.decode('utf-8').split("\n") + except: + error_msg=sys.exc_info() + self.log_error_message(error_msg,self.file_name) + self.prog_exit(self) + + return stdout.decode(),stderr.decode(),retcode + + def execute_cmd(self,cmd,env,dir): + """ + Execute the OS command on host + """ + try: + message="Received Command : {0}".format(self.mask_str(cmd)) + self.log_info_message(message,self.file_name) + cmd=self.unmask_str(cmd) + out = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (output,error),retcode = out.communicate(),out.returncode + except: + error_msg=sys.exc_info() + self.log_error_message(error_msg,self.file_name) + self.prog_exit(self) + + return output.decode(),error.decode(),retcode + + def mask_str(self,mstr): + """ + Function to mask the string. + """ + newstr=None + if self.oenv.encrypt_str__: + newstr=mstr.replace('HIDDEN_STRING','********') + # self.log_info_message(newstr,self.file_name) + if newstr: + # message = "Masked the string as encryption flag is set in the singleton class" + # self.log_info_message(message,self.file_name) + return newstr + else: + return mstr + + + def unmask_str(self,mstr): + """ + Function to unmask the string. + """ + newstr=None + if self.oenv.encrypt_str__: + newstr=mstr.replace('HIDDEN_STRING',self.oenv.original_str__.rstrip()) + # self.log_info_message(newstr,self.file_name) + if newstr: + # message = "Unmasked the encrypted string and returning original string from singleton class" + # self.log_info_message(message,self.file_name) + return newstr + else: + return mstr + + def set_mask_str(self,mstr): + """ + Function to unmask the string. + """ + if mstr: + # message = "Setting encrypted String flag to True and original string in singleton class" + # self.log_info_message(message,self.file_name) + self.oenv.encrypt_str__ = True + self.oenv.original_str__ = mstr + else: + message = "Masked String is empty so no change required in encrypted string flag and original string in singleton class" + self.log_info_message(message,self.file_name) + + def unset_mask_str(self): + """ + Function to unmask the string. + """ + # message = "Un-setting encrypted String flag and original string to None in Singleton class" + # self.log_info_message(message,self.file_name) + self.oenv.encrypt_str__ = None + self.oenv.original_str__ = None + + def prog_exit(self,message): + """ + This function exit the program because of some error + """ + sys.exit(127) + + def log_info_message(self,lmessage,fname): + """ + Print the INFO message in the logger + """ + funcname = sys._getframe(1).f_code.co_name + message = '''{:^15}-{:^20}:{}'''.format(fname.split('.', 1)[0],funcname.replace("_", ""),lmessage) + self.ologger.msg_ = message + self.ologger.logtype_ = "INFO" + self.ohandler.handle(self.ologger) + + def log_error_message(self,lmessage,fname): + """ + Print the Error message in the logger + """ + funcname=sys._getframe(1).f_code.co_name + message='''{:^15}-{:^20}:{}'''.format(fname.split('.', 1)[0],funcname.replace("_", ""),lmessage) + self.ologger.msg_=message + self.ologger.logtype_="ERROR" + self.ohandler.handle(self.ologger) + + def log_warn_message(self,lmessage,fname): + """ + Print the Error message in the logger + """ + funcname=sys._getframe(1).f_code.co_name + message='''{:^15}-{:^20}:{}'''.format(fname.split('.', 1)[0],funcname.replace("_", ""),lmessage) + self.ologger.msg_=message + self.ologger.logtype_="WARN" + self.ohandler.handle(self.ologger) + + def check_sql_err(self,output,err,retcode,status): + """ + Check if there are any error in sql command output + """ + match=None + msg2='''Sql command failed.Flag is set not to ignore this error.Please Check the logs,Exiting the Program!''' + msg3='''Sql command failed.Flag is set to ignore this error!''' + self.log_info_message("output : " + str(output or "no Output"),self.file_name) + # self.log_info_message("Error : " + str(err or "no Error"),self.file_name) + # self.log_info_message("Sqlplus return code : " + str(retcode),self.file_name) + # self.log_info_message("Command Check Status Set to :" + str(status),self.file_name) + + if status: + if (retcode!=0): + self.log_info_message("Error : " + str(err or "no Error"),self.file_name) + self.log_error_message("Sql Login Failed.Please Check the logs,Exiting the Program!",self.file_name) + self.prog_exit(self) + + match=re.search("(?i)(?m)error",output) + if status: + if (match): + self.log_error_message(msg2,self.file_name) + self.prog_exit("error") + else: + self.log_info_message("Sql command completed successfully",self.file_name) + else: + if (match): + self.log_warn_message("Sql command failed. Flag is set to ignore the error.",self.file_name) + else: + self.log_info_message("Sql command completed sucessfully.",self.file_name) + + def check_dgmgrl_err(self,output,err,retcode,status): + """ + Check if there are any error in sql command output + """ + match=None + msg2='''DGMGRL command failed.Flag is set not to ignore this error.Please Check the logs,Exiting the Program!''' + msg3='''DGMGRL command failed.Flag is set to ignore this error!''' + self.log_info_message("output : " + str(output or "no Output"),self.file_name) + + if status: + if (retcode!=0): + self.log_info_message("Error : " + str(err or "no Error"),self.file_name) + self.log_error_message("DGMGRL Login Failed.Please Check the logs,Exiting the Program!",self.file_name) + self.prog_exit(self) + + match=re.search("(?i)(?m)failed",output) + if status: + if (match): + self.log_error_message(msg2,self.file_name) + self.prog_exit("error") + else: + self.log_info_message("DGMGRL command completed successfully",self.file_name) + else: + if (match): + self.log_warn_message("DGMGRL command failed. Flag is set to ignore the error.",self.file_name) + else: + self.log_info_message("DGGRL command completed sucessfully.",self.file_name) + + def check_os_err(self,output,err,retcode,status): + """ + Check if there are any error in OS command execution + """ + msg1='''OS command returned code : {0} and returned output : {1}'''.format(str(retcode),str(output or "no Output")) + msg2='''OS command returned code : {0}, returned error : {1} and returned output : {2}'''.format(str(retcode),str(err or "no returned error"),str(output or "no retruned output")) + msg3='''OS command failed. Flag is set to ignore this error!''' + + if status: + if (retcode != 0): + self.log_error_message(msg2,self.file_name) + self.prog_exit(self) + else: + self.log_info_message(msg1,self.file_name) + else: + if (retcode != 0): + self.log_warn_message(msg2,self.file_name) + self.log_warn_message(msg3,self.file_name) + else: + self.log_info_message(msg1,self.file_name) + + def check_key(self,key,env_dict): + """ + Check the key if it exist in dictionary. + Attributes: + key (string): String to check if key exist in dictionary + env_dict (dict): Contains the env variable related to seup + """ + if key in env_dict: + return True + else: + return False + + def empty_key(self,key): + """ + key is empty and print failure message. + Attributes: + key (string): String is empty + """ + msg='''Variable {0} is not defilned. Exiting!'''.format(key) + self.log_error_message(msg,self.file_name) + self.prog_exit(self) + + def add_key(self,key,value,env_dict): + """ + Add the key in the dictionary. + Attributes: + key (string): key String to add in the dictionary + value (String): value String to add in dictionary + + Return: + dict + """ + if self.check_key(key,env_dict): + msg='''Variable {0} already exist in the env variables'''.format(key) + self.log_info_message(msg,self.file_name) + else: + if value: + env_dict[key] = value + self.oenv.update_env_vars(env_dict) + else: + msg='''Variable {0} value is not defilned to add in the env variables. Exiting!'''.format(value) + self.log_error_message(msg,self.file_name) + self.prog_exit(self) + + return env_dict + + def update_key(self,key,value,env_dict): + """ + update the key in the dictionary. + Attributes: + key (string): key String to update in the dictionary + value (String): value String to update in dictionary + + Return: + dict + """ + if self.check_key(key,env_dict): + if value: + env_dict[key] = value + self.oenv.update_env_vars(env_dict) + else: + msg='''Variable {0} value is not defined to update in the env variables!'''.format(key) + self.log_warn_message(msg,self.file_name) + else: + msg='''Variable {0} does not exist in the env variables'''.format(key) + self.log_info_message(msg,self.file_name) + + return env_dict + + def read_file(self,fname): + """ + Read the contents of a file and returns the contents to end user + Attributes: + fname (string): file to be read + + Return: + file data (string) + """ + f1 = open(fname, 'r') + fdata = f1.read() + f1.close + return fdata + + def write_file(self,fname,fdata): + """ + write the contents to a file + Attributes: + fname (string): file to be written + fdata (string): COnetents to be written + + Return: + file data (string) + """ + f1 = open(fname, 'w') + f1.write(fdata) + f1.close + + def append_file(self,fname,fdata): + """ + appened the contents to a file + Attributes: + fname (string): file to be written + fdata (string): COnetents to be written + + Return: + file data (string) + """ + f1 = open(fname, 'a') + f1.write(fdata) + f1.close + + def create_dir(self,dir,local,remote,user,group): + """ + Create dir locally or remotely + Attributes: + dir (string): dir to be created + local (boolean): dir to craetes locally + remote (boolean): dir to be created remotely + node (string): remote node name on which dir to be created + user (string): remote user to be connected + """ + self.log_info_message("Inside create_dir()",self.file_name) + if local: + if not os.path.isdir(dir): + cmd='''mkdir -p {0}'''.format(dir) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + cmd='''chown -R {0}:{1} {2}'''.format(user,group,dir) + output,error,retcode=self.execute_cmd(cmd,None,None) + cmd='''chmod 755 {0}'''.format(dir) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + else: + msg='''Dir {0} already exist'''.format(dir) + self.log_info_message(msg,self.file_name) + + + def create_file(self,file,local,remote,user): + """ + Create dir locally or remotely + Attributes: + file (string): file to be created + local (boolean): dir to craetes locally + remote (boolean): dir to be created remotely + node (string): remote node name on which dir to be created + user (string): remote user to be connected + """ + self.log_info_message("Inside create_file()",self.file_name) + if local: + if not os.path.isfile(file): + cmd='''touch {0}'''.format(file) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + + def create_pfile(self,pfile,spfile): + """ + Create pfile from spfile locally + """ + self.log_info_message("Inside create_pfile()",self.file_name) + osuser,dbhome,dbbase,oinv=self.get_db_params() + osid=self.ora_env_dict["GOLD_SID_NAME"] + + sqlpluslogincmd=self.get_sqlplus_str(dbhome,osid,osuser,"sys",None,None,None,osid,None,None,None) + sqlcmd=""" + create pfile='{0}' from spfile='{1}'; + """.format(pfile,spfile) + self.log_info_message("Running the sqlplus command to create pfile from spfile: " + sqlcmd,self.file_name) + output,error,retcode=self.run_sqlplus(sqlpluslogincmd,sqlcmd,None) + self.log_info_message("Calling check_sql_err() to validate the sql command return status",self.file_name) + self.check_sql_err(output,error,retcode,True) + + def create_spfile(self,spfile,pfile): + """ + Create spfile from pfile locally + """ + self.log_info_message("Inside create_spfile()",self.file_name) + osuser,dbhome,dbbase,oinv=self.get_db_params() + osid=self.ora_env_dict["DB_NAME"] + "1" + + sqlpluslogincmd=self.get_sqlplus_str(dbhome,osid,osuser,"sys",None,None,None,osid,None,None,None) + sqlcmd=""" + create spfile='{0}' from pfile='{1}'; + """.format(spfile,pfile) + self.log_info_message("Running the sqlplus command to create spfile from pfile: " + sqlcmd,self.file_name) + output,error,retcode=self.run_sqlplus(sqlpluslogincmd,sqlcmd,None) + self.log_info_message("Calling check_sql_err() to validate the sql command return status",self.file_name) + self.check_sql_err(output,error,retcode,True) + + def resetlogs(self,osid): + """ + Reset the database logs + """ + self.log_info_message("Inside resetlogs()",self.file_name) + osuser,dbhome,dbbase,oinv=self.get_db_params() + + sqlpluslogincmd=self.get_sqlplus_str(dbhome,osid,osuser,"sys",None,None,None,osid,None,None,None) + sqlcmd=''' + alter database open resetlogs; + ''' + self.log_info_message("Running the sqlplus command to resetlogs" + sqlcmd,self.file_name) + output,error,retcode=self.run_sqlplus(sqlpluslogincmd,sqlcmd,None) + self.log_info_message("Calling check_sql_err() to validate the sql command return status",self.file_name) + self.check_sql_err(output,error,retcode,True) + + def check_file(self,file,local,remote,user): + """ + check locally or remotely + Attributes: + file (string): file to be created + local (boolean): dir to craetes locally + remote (boolean): dir to be created remotely + node (string): remote node name on which dir to be created + user (string): remote user to be connected + """ + self.log_info_message("Inside check_file()",self.file_name) + if local: + if os.path.isfile(file): + return True + else: + return False + + + def latest_file(self,dir,): + """ + List the latest file in a directory + """ + files = os.listdir(dir) + paths = [os.path.join(dir, basename) for basename in files] + return max(paths, key=os.path.getctime) + + def latest_dir(self,dir,subdir): + """ + Get the latest dir matching a regexp + """ + self.log_info_message(" Received Params : basedir=" + dir + " subdir=" + subdir,self.file_name) + if subdir is None: + subdir = '*/' + dir1=sorted(pathlib.Path(dir).glob(subdir), key=os.path.getmtime)[-1] + return dir1 + + def shutdown_db(self,osid): + """ + Shutdown the database + """ + osuser,dbhome,dbbase,oinv=self.get_db_params() + self.log_info_message("Inside shutdown_db()",self.file_name) + sqlpluslogincmd=self.get_sqlplus_str(dbhome,osid,osuser,"sys",None,None,None,osid,None,None,None) + + sqlcmd=''' + shutdown immediate; + ''' + self.log_info_message("Running the sqlplus command to shutdown the database: " + sqlcmd,self.file_name) + output,error,retcode=self.run_sqlplus(sqlpluslogincmd,sqlcmd,None) + self.log_info_message("Calling check_sql_err() to validate the sql command return status",self.file_name) + self.check_sql_err(output,error,retcode,False) + + def start_db(self,osid,mode,pfile=None): + """ + start the database + """ + osuser,dbhome,dbbase,oinv=self.get_db_params() + self.log_info_message("Inside start_db()",self.file_name) + cmd="" + if mode is None: + mode=" " + + if pfile is not None: + cmd='''startup {1} pfile={0}'''.format(pfile,mode) + else: + cmd='''startup {0}'''.format(mode) + + sqlpluslogincmd=self.get_sqlplus_str(dbhome,osid,osuser,"sys",None,None,None,osid,None,None,None) + sqlcmd=''' + {0}; + '''.format(cmd) + self.log_info_message("Running the sqlplus command to start the database: " + sqlcmd,self.file_name) + output,error,retcode=self.run_sqlplus(sqlpluslogincmd,sqlcmd,None) + self.log_info_message("Calling check_sql_err() to validate the sql command return status",self.file_name) + self.check_sql_err(output,error,retcode,True) + + def check_substr_match(self,source_str,sub_str): + """ + CHeck if substring exist + """ + # self.log_info_message("Inside check_substr_match()",self.file_name) + if (source_str.find(sub_str) != -1): + return True + else: + return False + + def check_status_value(self,match): + """ + return completed or notcompleted + """ + # self.log_info_message("Inside check_status_value()",self.file_name) + if match: + return 'completed' + else: + return 'notcompleted' + + def remove_file(self,fname): + """ + Remove if file exist + """ + self.log_info_message("Inside remove_file()",self.file_name) + if os.path.exists(fname): + os.remove(fname) + + def get_global_dbdomain(self,ohost,gdbname): + """ + get the global dbname + """ + domain = self.get_host_domain() + if domain: + global_dbname = gdbname + domain + else: + global_dbname = gdbname + + return gdbname + + +########## Checking variable is set ############ + def check_env_variable(self,key,eflag): + """ + Check if env variable is set. If not exit if eflag is not set + """ + #self.ora_env_dict=self.oenv.get_env_vars() + if self.check_key(key,self.ora_env_dict): + self.log_info_message("Env variable " + key + " is set. Check passed!",self.file_name) + else: + if eflag: + self.log_error_message("Env variable " + key + " is not set " + ".Exiting..", self.file_name) + self.prog_exit("127") + else: + self.log_warn_message("Env variable " + key + " is not set " + ".Ignoring the variable and procedding further..", self.file_name) + + return True + + def get_optype(self): + """AI is creating summary for get_optype + This function retruns the op_type based on nodes + """ + racenvfile=self.get_envfile() + if racenvfile: + pass + + def get_envfile(self): + """AI is creating summary for get_envfile + It returns the RAC Env file + Returns: + str: return the raaenv file + """ + racenvfile="" + if self.check_key("RAC_ENV_FILE",self.ora_env_dict): + racenvfile=self.ora_env_dict["RAC_ENV_FILE"] + else: + racenvfile="/etc/rac_env_vars/envfile" + + return racenvfile + + def populate_rac_env_vars(self): + """ + Populate RAC env vars as key value pair + """ + racenvfile=self.get_envfile() + + if os.path.isfile(racenvfile): + with open(racenvfile) as fp: + for line in fp: + newstr=None + d=None + newstr=line.replace("export ","").strip() + self.log_info_message(newstr + " newstr is populated: ",self.file_name) + if len(newstr.split("=")) == 2: + key=newstr.split("=")[0] + value=newstr.split("=")[1] + # self.log_info_message(key + " key is populated: " + self.ora_env_dict[key] ,self.file_name) + if not self.check_key(key,self.ora_env_dict): + self.ora_env_dict=self.add_key(key,value,self.ora_env_dict) + self.log_info_message(key + " key is populated: " + self.ora_env_dict[key] ,self.file_name) + else: + self.log_info_message(key + " key exist with value " + self.ora_env_dict[key] ,self.file_name) + pass + # self.ora_env_dict=self.ora_env_dict + # print(self.ora_env_dict + +########### Get the install Node ####### + def get_installnode(self): + """AI is creating summary for get_installnode + This function return the install node name + Returns: + string: returns the install node name + string : return public host name + """ + install_node=None + pubhost=None + + if self.check_key("INSTALL_NODE",self.ora_env_dict): + install_node=self.ora_env_dict["INSTALL_NODE"] + else: + pass + + pubhost=self.get_public_hostname() + + return install_node,pubhost + +########## Ping the IP ############### + def ping_ip(self,ip,status): + """ + Check if IP is pingable or not + """ + cmd='''ping -c 3 {0}'''.format(ip) + output,error,retcode=self.execute_cmd(cmd,None,None) + if status: + self.check_os_err(output,error,retcode,True) + else: + self.check_os_err(output,error,retcode,None) + +########## Ping the IP ############### + def ping_host(self,host): + """ + Check if IP is pingable or not + """ + cmd='''ping -c 3 {0}'''.format(host) + output,error,retcode=self.execute_cmd(cmd,None,None) + return retcode + +########### IP Validations ############ + def validate_ip(self,ip): + """ + validate the IP + """ + try: + socket.inet_pton(socket.AF_INET, ip) + except socket.error: # not a valid address + return False + + return True + +######### Block Device Check ############# + def disk_exists(self,path): + """ + Check if block device exist + """ + try: + if self.check_key("ASM_ON_NAS",self.ora_env_dict): + if self.ora_env_dict["ASM_ON_NAS"] == 'True': + return stat.S_ISREG(os.stat(path).st_mode) + else: + return False + else: + return stat.S_ISBLK(os.stat(path).st_mode) + except: + return False + +######### Get Password ############## + def get_os_password(self): + """ + get the OS password + """ + ospasswd=self.get_password(None) + return ospasswd + + def get_asm_passwd(self): + """ + get the ASM password + """ + asmpasswd=self.get_password(None) + return asmpasswd + + def get_db_passwd(self): + """ + get the DB password + """ + dbpasswd=self.get_password(None) + return dbpasswd + + def get_tde_passwd(self): + """ + get the tde password + """ + dbpasswd=self.get_password("TDE_PASSWORD") + return dbpasswd + + def get_sys_passwd(self): + """ + get the sys user password + """ + dbpasswd=self.get_password(None) + return dbpasswd + + def get_password(self,key): + """ + get the password + """ + svolume=None + pwdfile=None + pwdkey=None + passwdfile=None + keyvolume=None + + if key is not None: + if key == 'TDE_PASSWORD': + svolume,pwdfile,pwdkey,passwdfile,keyvolume=self.get_tde_passwd_details() + else: + svolume,pwdfile,pwdkey,passwdfile,keyvolume=self.get_db_passwd_details() + + if self.check_key("PWD_VOLUME",self.ora_env_dict): + pwd_volume=self.ora_env_dict["PWD_VOLUME"] + else: + pwd_volume="/var/tmp" + + password=self.set_password(svolume,pwdfile,pwdkey,passwdfile,keyvolume,pwd_volume) + return password + + def get_tde_passwd_details(self): + """ + This function return the TDE parameters + """ + if self.check_key("TDE_SECRET_VOLUME",self.ora_env_dict): + self.log_info_message("TDE_SECRET_VOLUME set to : ",self.ora_env_dict["TDE_SECRET_VOLUME"]) + msg='''TDE_SECRET_VOLUME passed as an env variable and set to {0}'''.format(self.ora_env_dict["TDE_SECRET_VOLUME"]) + else: + self.ora_env_dict=self.add_key("TDE_SECRET_VOLUME","/run/.tdesecret",self.ora_env_dict) + msg='''TDE_SECRET_VOLUME not passed as an env variable. Setting default to {0}'''.format(self.ora_env_dict["TDE_SECRET_VOLUME"]) + self.log_warn_message(msg,self.file_name) + + if self.check_key("TDE_KEY_SECRET_VOLUME",self.ora_env_dict): + self.log_info_message("Tde Secret_Volume set to : ",self.ora_env_dict["TDE_KEY_SECRET_VOLUME"]) + msg='''TDE_KEY_SECRET_VOLUME passed as an env variable and set to {0}'''.format(self.ora_env_dict["TDE_KEY_SECRET_VOLUME"]) + else: + if self.check_key("TDE_SECRET_VOLUME",self.ora_env_dict): + self.ora_env_dict=self.add_key("TDE_KEY_SECRET_VOLUME",self.ora_env_dict["TDE_SECRET_VOLUME"],self.ora_env_dict) + msg='''TDE_KEY_SECRET_VOLUME not passed as an env variable. Setting default to {0}'''.format(self.ora_env_dict["TDE_KEY_SECRET_VOLUME"]) + self.log_warn_message(msg,self.file_name) + + if self.check_key("TDE_PWD_FILE",self.ora_env_dict): + msg='''TDE_PWD_FILE passed as an env variable and set to {0}'''.format(self.ora_env_dict["TDE_PWD_FILE"]) + else: + self.ora_env_dict=self.add_key("TDE_PWD_FILE","tde_pwdfile.enc",self.ora_env_dict) + msg='''TDE_PWD_FILE not passed as an env variable. Setting default to {0}'''.format(self.ora_env_dict["TDE_PWD_FILE"]) + self.log_warn_message(msg,self.file_name) + + if self.check_key("TDE_PWD_KEY",self.ora_env_dict): + msg='''TDE_PWD_KEY passed as an env variable and set to {0}'''.format(self.ora_env_dict["TDE_PWD_KEY"]) + else: + self.ora_env_dict=self.add_key("TDE_PWD_KEY","tdepwd.key",self.ora_env_dict) + msg='''TDE_PWD_KEY not passed as an env variable. Setting default to {0}'''.format(self.ora_env_dict["TDE_PWD_KEY"]) + self.log_warn_message(msg,self.file_name) + + return self.ora_env_dict["TDE_SECRET_VOLUME"],self.ora_env_dict["TDE_PWD_FILE"],self.ora_env_dict["TDE_PWD_KEY"],"tdepwdfile",self.ora_env_dict["TDE_KEY_SECRET_VOLUME"] + + def get_db_passwd_details(self): + """ + This function return the db passwd paameters + """ + if self.check_key("SECRET_VOLUME",self.ora_env_dict): + self.log_info_message("Secret_Volume set to : ",self.ora_env_dict["SECRET_VOLUME"]) + msg='''SECRET_VOLUME passed as an env variable and set to {0}'''.format(self.ora_env_dict["SECRET_VOLUME"]) + else: + self.ora_env_dict=self.add_key("SECRET_VOLUME","/run/secrets",self.ora_env_dict) + msg='''SECRET_VOLUME not passed as an env variable. Setting default to {0}'''.format(self.ora_env_dict["SECRET_VOLUME"]) + self.log_warn_message(msg,self.file_name) + + if self.check_key("KEY_SECRET_VOLUME",self.ora_env_dict): + self.log_info_message("Secret_Volume set to : ",self.ora_env_dict["KEY_SECRET_VOLUME"]) + msg='''KEY_SECRET_VOLUME passed as an env variable and set to {0}'''.format(self.ora_env_dict["KEY_SECRET_VOLUME"]) + else: + if self.check_key("SECRET_VOLUME",self.ora_env_dict): + self.ora_env_dict=self.add_key("KEY_SECRET_VOLUME",self.ora_env_dict["SECRET_VOLUME"],self.ora_env_dict) + msg='''KEY_SECRET_VOLUME not passed as an env variable. Setting default to {0}'''.format(self.ora_env_dict["KEY_SECRET_VOLUME"]) + self.log_warn_message(msg,self.file_name) + + if self.check_key("DB_PWD_FILE",self.ora_env_dict): + msg='''DB_PWD_FILE passed as an env variable and set to {0}'''.format(self.ora_env_dict["DB_PWD_FILE"]) + else: + self.ora_env_dict=self.add_key("DB_PWD_FILE","common_os_pwdfile.enc",self.ora_env_dict) + msg='''DB_PWD_FILE not passed as an env variable. Setting default to {0}'''.format(self.ora_env_dict["DB_PWD_FILE"]) + self.log_warn_message(msg,self.file_name) + + if self.check_key("PWD_KEY",self.ora_env_dict): + msg='''PWD_KEY passed as an env variable and set to {0}'''.format(self.ora_env_dict["PWD_KEY"]) + else: + self.ora_env_dict=self.add_key("PWD_KEY","pwd.key",self.ora_env_dict) + msg='''PWD_KEY not passed as an env variable. Setting default to {0}'''.format(self.ora_env_dict["PWD_KEY"]) + self.log_warn_message(msg,self.file_name) + + if self.check_key("PASSWORD_FILE",self.ora_env_dict): + msg='''PASSWORD_FILE passed as an env variable and set to {0}'''.format(self.ora_env_dict["PASSWORD_FILE"]) + else: + self.ora_env_dict=self.add_key("PASSWORD_FILE","dbpasswd.file",self.ora_env_dict) + msg='''PASSWORD_FILE not passed as an env variable. Setting default to {0}'''.format(self.ora_env_dict["PASSWORD_FILE"]) + self.log_warn_message(msg,self.file_name) + + return self.ora_env_dict["SECRET_VOLUME"],self.ora_env_dict["DB_PWD_FILE"],self.ora_env_dict["PWD_KEY"],self.ora_env_dict["PASSWORD_FILE"],self.ora_env_dict["KEY_SECRET_VOLUME"] + + def set_password(self,secret_volume,passwd_file,key_file,dbpasswd_file,key_secret_volume,pwd_volume): + passwd_file_flag=False + password=None + password_file=None + passwordfile1='''{0}/{1}'''.format(secret_volume,passwd_file) + passwordkeyfile='''{0}/{1}'''.format(secret_volume,key_file) + passwordfile2='''{0}/{1}'''.format(secret_volume,dbpasswd_file) + self.log_info_message("Secret volume file set to : " + secret_volume,self.file_name) + self.log_info_message("Password file set to : " + passwd_file,self.file_name) + self.log_info_message("key file set to : " + key_file,self.file_name) + self.log_info_message("dbpasswd file set to : " + dbpasswd_file,self.file_name) + self.log_info_message("key secret volume set to : " + key_secret_volume,self.file_name) + self.log_info_message("pwd volume set : " + pwd_volume,self.file_name) + self.log_info_message("passwordfile1 set to : " + passwordfile1,self.file_name) + self.log_info_message("passwordkeyfile set to : " + passwordkeyfile,self.file_name) + self.log_info_message("passwordfile2 set to : " + passwordfile2,self.file_name) + if (os.path.isfile(passwordfile1)) and (os.path.isfile(passwordkeyfile)): + msg='''Passwd file {0} and key file {1} exist. Password file Check passed!'''.format(passwordfile1,passwordkeyfile) + self.log_info_message(msg,self.file_name) + msg='''Reading encrypted passwd from file {0}.'''.format(passwordfile1) + self.log_info_message(msg,self.file_name) + cmd=None + if self.check_key("ENCRYPTION_TYPE",self.ora_env_dict): + if self.ora_env_dict["ENCRYPTION_TYPE"].lower() == "aes256": + cmd='''openssl enc -d -aes-256-cbc -in \"{0}/{1}\" -out {2}/{1} -pass file:\"{3}/{4}\"'''.format(secret_volume,passwd_file,pwd_volume,key_secret_volume,key_file) + elif self.ora_env_dict["ENCRYPTION_TYPE"].lower() == "rsautl": + cmd ='''openssl rsautl -decrypt -in \"{0}/{1}\" -out {2}/{1} -inkey \"{3}/{4}\"'''.format(secret_volume,passwd_file,pwd_volume,key_secret_volume,key_file) + else: + pass + else: + cmd ='''openssl pkeyutl -decrypt -in \"{0}/{1}\" -out {2}/{1} -inkey \"{3}/{4}\"'''.format(secret_volume,passwd_file,pwd_volume,key_secret_volume,key_file) + + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + passwd_file_flag = True + password_file='''{0}/{1}'''.format(pwd_volume,passwd_file) + elif os.path.isfile(passwordfile2): + msg='''Passwd file {0} exist. Password file Check passed!'''.format(dbpasswd_file) + self.log_info_message(msg,self.file_name) + msg='''Reading encrypted passwd from file {0}.'''.format(dbpasswd_file) + self.log_info_message(msg,self.file_name) + cmd='''openssl base64 -d -in \"{0}\" -out \"{2}/{1}\"'''.format(passwordfile2,dbpasswd_file,pwd_volume) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + passwd_file_flag = True + password_file='''{1}/{0}'''.format(dbpasswd_file,pwd_volume) + + if not passwd_file_flag: + # get random password pf length 8 with letters, digits, and symbols + characters1 = string.ascii_letters + string.digits + "_-%#" + str1 = ''.join(random.choice(string.ascii_uppercase) for i in range(4)) + str2 = ''.join(random.choice(characters1) for i in range(8)) + password=str1+str2 + else: + fname='''{0}'''.format(password_file) + fdata=self.read_file(fname) + password=fdata + + if self.check_key("ORACLE_PWD",self.ora_env_dict): + msg="ORACLE_PWD is passed as an env variable. Check Passed!" + self.log_info_message(msg,self.file_name) + else: + #self.ora_env_dict=self.add_key("ORACLE_PWD",password,self.ora_env_dict) + #msg="ORACLE_PWD set to HIDDEN_STRING generated using encrypted password file" + self.log_info_message(msg,self.file_name) + + return password + +######### Get OS Password ############## + def reset_os_password(self,user): + """ + reset the OS password + """ + self.log_info_message('''Resetting OS user {0} password'''.format(user),self.file_name) + #proc = subprocess.Popen(['/usr/bin/passwd', user, '--stdin']) + #proc.communicate(passwd) + ospasswd=self.get_os_password() + self.set_mask_str(ospasswd) + cmd='''usermod --password $(openssl passwd -1 {1}) {0}'''.format(user,'HIDDEN_STRING') + #cmd='''bash -c \"echo -e '{1}\\n{1}' | passwd {0}\"'''.format(user,passwd) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + self.unset_mask_str() + +######### Copy the file to remote machine ############ + def scpfile(self,node,srcfile,destfile,user): + """ + copy file to remot machine + """ + cmd='''su - {0} -c "scp {2} {0}@{1}:{3}"'''.format(user,node,srcfile,destfile) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + +######### Copy file across cluster ######### + def copy_file_cluster(self,srcfile,destfile,user): + """ + copy file on all the machines of the cluster + """ + cluster_nodes=self.get_cluster_nodes() + for node in cluster_nodes.split(" "): + self.scpfile(node,srcfile,destfile,user) + +######### Get the existing Cluster Nodes ############## + def get_existing_clu_nodes(self,eflag): + """ + Checking existing Cluster nodes and returning cluster nodes + """ + cluster_nodes=None + self.log_info_message("Checking existing CRS nodes and returning cluster nodes",self.file_name) + if self.check_key("EXISTING_CLS_NODE",self.ora_env_dict): + return self.ora_env_dict["EXISTING_CLS_NODE"] + else: + if eflag: + self.log_error_message('''Existing CLS nodes are not set. Exiting..''',self.file_name) + self.prog_exit("127") + else: + self.log_warn_message('''Existing CLS nodes are not set.''',self.file_name) + return cluster_nodes + + +######### Return the existing Cluster Nodes using oldnodes ############## + def get_existing_cls_nodes(self,hostname,sshnode): + """ + Checking existing Cluster nodes using clsnodes + """ + giuser,gihome,gibase,oinv=self.get_gi_params() + cluster_nodes=None + cmd='''su - {0} -c "ssh {2} '{1}/bin/olsnodes'"'''.format(giuser,gihome,sshnode) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + crs_nodes="" + if not hostname: + hostname="" + + crs_node_list=output.split("\n") + for node in crs_node_list: + if hostname != node: + crs_nodes= crs_nodes + "," + node + + return crs_nodes.strip(",") + + +######### Get the Cluster Nodes ############## + def get_cluster_nodes(self): + """ + Checking Cluster nodes and returning cluster nodes + """ + cluster_nodes=None + self.log_info_message("Checking CRS nodes and returning cluster nodes",self.file_name) + if self.check_key("CRS_NODES",self.ora_env_dict): + cluster_nodes,vip_nodes,priv_nodes=self.process_cluster_vars("CRS_NODES") + else: + cluster_nodes = self.get_public_hostname() + + return cluster_nodes + +####### Get the nwIfaces and network ####### + def get_nwifaces(self): + """ + This function returns the oracle.install.crs.config.networkInterfaceList for prepare responsefile + """ + nwlist="" + nwname="" + nwflag=None + privnwlist="" + ipcidr="" + netmask="" + netmasklist="" + + if self.detect_k8s_env(): + if self.check_key("NW_CIDR",self.ora_env_dict): + ipcidr=self.get_cidr_info(self.ora_env_dict["NW_CIDR"]) + netmask=self.ora_env_dict["NW_CIDR"].split("/")[1] + if ipcidr: + self.log_info_message("Getting network card name for CIDR: " + ipcidr,self.file_name) + nwname=self.get_nw_name(ipcidr) + else: + pubmask,pubsubnet,nwname=self.get_nwlist("public") + ip_address=pubsubnet.split(".") + ipcidr=ip_address[0] + "." + ip_address[1] + ".0.0" + netmask_address=pubmask.split(".") + netmask=netmask_address[0] + "." + netmask_address[1] + ".0.0" + + privnwlist,privnetmasklist=self.get_priv_nwlist() + if nwname: + self.log_info_message("The network card: " + nwname + " for the ip: " + ipcidr,self.file_name) + nwlist='''{0}:{1}:1,{2}'''.format(nwname,ipcidr,privnwlist) + netmasklist='''{0}:{1},{2}'''.format(nwname,netmask,privnetmasklist) + else: + self.log_error_message("Failed to get network card matching for the subnet:" + ipcidr ,self.file_name) + self.prog_exit("127") + elif self.check_key("SINGLE_NETWORK",self.ora_env_dict): + pubmask,pubsubnet,pubnwname=self.get_nwlist("public") + nwlist='''{0}:{1}:1,{0}:{1}:5'''.format(pubnwname,pubsubnet) + else: + pubmask,pubsubnet,pubnwname=self.get_nwlist("public") + privnwlist,privnetmasklist=self.get_priv_nwlist() + nwlist='''{0}:{1}:1,{2}'''.format(pubnwname,pubsubnet,privnwlist) + + + return nwlist,netmasklist + +###### Get the Private nwlist ####################### + def get_priv_nwlist(self): + """ + This function get the private nwlist + """ + privnwlist="" + netmasklist="" + if self.check_key("PRIVATE_HOSTS",self.ora_env_dict): + privmask,privsubnet,privnwname=self.get_nwlist("privatehost") + privnwlist='''{0}:{1}:5'''.format(privnwname,privsubnet) + netmasklist='''{0}:{1}'''.format(privnwname,privmask) + else: + if self.check_key("PRIVATE_IP1_LIST",self.ora_env_dict): + privmask,privsubnet,privnwname=self.get_nwlist("privateip1") + privnwlist='''{0}:{1}:5'''.format(privnwname,privsubnet) + netmasklist='''{0}:{1}'''.format(privnwname,privmask) + if self.check_key("PRIVATE_IP2_LIST",self.ora_env_dict): + privmask,privsubnet,privnwname=self.get_nwlist("privateip2") + privnwlist='''{0},{1}:{2}:5'''.format(privnwlist,privnwname,privsubnet) + netmasklist='''{0},{1}:{2}'''.format(netmasklist,privnwname,privmask) + + return privnwlist,netmasklist + +####### Detect K8s Env ################################ + def detect_k8s_env(self): + """ + This function detect the K8s env and return the True or False + """ + k8s_flag=None + f = open("/proc/self/cgroup", "r") + if "/kubepods" in f.read(): + k8s_flag=True + else: + if self.check_file("/run/secrets/kubernetes.io/serviceaccount/token","local",None,None): + k8s_flag=True + + return k8s_flag +######## Process the nwlist and return netmask,net subnet and ne card name ####### + def get_nwlist(self,checktype): + """ + This function returns the nwlist for prepare responsefile + """ + nwlist=None + nwflag=None + nwname=None + nmask=None + nwsubnet=None + domain=None + ipaddr="" + + if self.check_key("CRS_NODES",self.ora_env_dict): + pub_nodes,vip_nodes,priv_nodes=self.process_cluster_vars("CRS_NODES") + if checktype=="privatehost": + crs_nodes=priv_nodes.replace(" ",",") + nodelist=priv_nodes.split(" ") + domain=self.ora_env_dict["PRIVATE_HOSTS_DOMAIN"] if self.check_key("PRIVATE_HOSTS_DOMAIN",self.ora_env_dict) else self.get_host_domain() + elif checktype=="privateip1": + nodelist=self.ora_env_dict["PRIVATE_IP1_LIST"].split(",") + elif checktype=="privateip2": + nodelist=self.ora_env_dict["PRIVATE_IP2_LIST"].split(",") + else: + crs_nodes=pub_nodes.replace(" ",",") + nodelist=pub_nodes.split(" ") + domain=self.ora_env_dict["PUBLIC_HOSTS_DOMAIN"] if self.check_key("PUBLIC_HOSTS_DOMAIN",self.ora_env_dict) else self.get_host_domain() + print(nodelist) + for pubnode in nodelist: + self.log_info_message("Getting IP for the hostname: " + pubnode,self.file_name) + if checktype=="privateip1": + ipaddr=pubnode + elif checktype=="privateip2": + ipaddr=pubnode + else: + ipaddr=self.get_ip(pubnode,domain) + + if ipaddr: + self.log_info_message("Getting network name for the IP: " + ipaddr,self.file_name) + nwname=self.get_nw_name(ipaddr) + if nwname: + self.log_info_message("The network card: " + nwname + " for the ip: " + ipaddr,self.file_name) + nmask=self.get_netmask_info(nwname) + nwsubnet=self.get_subnet_info(ipaddr,nmask) + nwflag=True + break + else: + self.log_error_message("Failed to get the IP addr for public hostname: " + pubnode + ".Exiting..",self.file_name) + self.prog_exit("127") + + if nmask and nwsubnet and nwname and nwflag: + return nmask,nwsubnet,nwname + else: + self.log_error_message("Failed to get the required details. Exiting...",self.file_name) + self.prog_exit("127") + +######## Get the CRS Nodes ################## + def get_crsnodes(self): + """ + This function returns the oracle.install.crs.config.clusterNodes for prepare responsefile + """ + cluster_nodes="" + pub_nodes,vip_nodes,priv_nodes=self.process_cluster_vars("CRS_NODES") + for (pubnode,vipnode) in zip(pub_nodes.split(" "),vip_nodes.split(" ")): + cluster_nodes += pubnode + ":" + vipnode + ":HUB" + "," + + return cluster_nodes.strip(',') + +######## Process host variables ############## + def process_cluster_vars(self,key): + """ + This function process CRS_NODES and return public hosts, or VIP hosts or Priv Hosts or cluser string + """ + pubhost=" " + viphost=" " + privhost=" " + self.log_info_message("Inside process_cluster_vars()",self.file_name) + cvar_str=self.ora_env_dict[key] + for item in cvar_str.split(";"): + self.log_info_message("Cluster Node Desc: " + item ,self.file_name) + cvar_dict=dict(item1.split(":") for item1 in item.split(",")) + for ckey in cvar_dict.keys(): + # self.log_info_message("key:" + ckey ,self.file_name) + # self.log_info_message("Value:" + cvar_dict[ckey] ,self.file_name) + if ckey == 'pubhost': + pubhost += cvar_dict[ckey] + " " + if ckey == 'viphost': + viphost += cvar_dict[ckey] + " " + if ckey == 'privhost': + privhost += cvar_dict[ckey] + " " + self.log_info_message("Pubhosts:" + pubhost.strip() + " Pubhost count:" + str(len(pubhost.strip().split(" "))),self.file_name) + self.log_info_message("Viphosts:" + viphost.strip() + "Viphost count:" + str(len(viphost.strip().split(" "))),self.file_name) + if len(pubhost.strip().split(" ")) == len(viphost.strip().split(" ")): + return pubhost.strip(),viphost.strip(),privhost.strip() + else: + self.log_error_message("Public hostname count is not matching with virtual hostname count.Exiting...",self.file_name) + self.prog_exit("127") + + +######### Get the Public Hostname############## + def get_public_hostname(self): + """ + Return Public Hostname + """ + return socket.gethostname() + + ######### Get the DOMAIN############## + def get_host_domain(self): + """ + Return Public Hostname + """ + domain=None + domain=socket.getfqdn().split('.',1)[1] + if domain is None: + domain="example.info" + + return domain + + ######### get the public IP ############## + def get_ip(self,hostname,domain): + """ + Return the Ip based on hostname + """ + if not domain: + domain=self.get_host_domain() + + return socket.gethostbyname(hostname + '.' + domain) + +######### Get network card ############## + def get_nw_name(self,ip): + """ + Get the network card name based on IP + """ + self.log_info_message('''Getting network card name based on IP: {0} '''.format(ip),self.file_name) + cmd='''ifconfig | awk '/{0}/ {{ print $1 }}' RS="\n\n" | awk -F ":" '{{ print $1 }}' | head -1'''.format(ip) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + return output.strip() + +######### Get the netmask info ################ + def get_netmask_info(self,nwcard): + """ + Get the network mask + """ + self.log_info_message('''Getting netmask'''.format(nwcard),self.file_name) + cmd="""ifconfig {0} | awk '/netmask/ {{print $4}}'""".format(nwcard) + output,error,retcode=self.execute_cmd(cmd,None,None) + return output.strip() + +######### Get network subnet info ############## + def get_subnet_info(self,ip,netmask): + """ + Get the network card name based on IP + """ + self.log_info_message('''Getting network subnet info name based on IP {0} and netmask {1}'''.format(ip,netmask),self.file_name) + cmd="""ipcalc -np {0} {1} | grep NETWORK | awk -F '=' '{{ print $2 }}'""".format(ip,netmask) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + return output.strip() + +######### Get CIDR portion info ############## + def get_cidr_info(self,cidr): + """ + Get the non zero portion of the CIDR + """ + self.log_info_message('''Checking if network card exist with matching network details {0}'''.format(cidr),self.file_name) + iplist=cidr.split(".") + ipcidr="" + for ipo in iplist: + if ipo.startswith('0'): + break + else: + ipcidr += ipo + "." + + str1=ipcidr.strip() + ipcidr=str1.strip(".") + return ipcidr + +######## Build the ASM device list ######### + def build_asm_device(self,key,reduntype): + """ + Build the ASM device list + """ + self.log_info_message('''Building ASM device list''',self.file_name) + ASM_DISKGROUP_FG_DISKS="" + ASM_DISKGROUP_DISKS="" + asmdevlist=self.ora_env_dict[key].split(",") + for disk1 in asmdevlist: + disk=disk1.strip('"') + if self.check_key("ASM_DISK_CLEANUP_FLAG",self.ora_env_dict): + if self.ora_env_dict["ASM_DISK_CLEANUP_FLAG"] == "TRUE": + self.asm_disk_cleanup(disk) + if reduntype == 'NORMAL': + ASM_DISKGROUP_FG_DISKS+=disk + ",," + ASM_DISKGROUP_DISKS+=disk + "," + elif reduntype == 'HIGH': + ASM_DISKGROUP_FG_DISKS+=disk + ",," + ASM_DISKGROUP_DISKS+=disk + "," + else: + ASM_DISKGROUP_FG_DISKS+=disk + "," + ASM_DISKGROUP_DISKS+=disk + "," + + if reduntype != 'NORMAL' and reduntype != 'HIGH': + fdata=ASM_DISKGROUP_DISKS[:-1] + ASM_DISKGROUP_DISKS=fdata + + return ASM_DISKGROUP_FG_DISKS,ASM_DISKGROUP_DISKS + +######## Build the ASM device list ######### + def build_asm_discovery_str(self,key): + """ + Build the ASM device list + """ + asm_disk=None + asmdisk=self.ora_env_dict[key].split(",")[0] + asm_disk_dir=asmdisk.rsplit("/",1)[0] + asm_disk1=asmdisk.rsplit("/",1)[1] + if len(asm_disk1) <= 3: + asm_disk=asmdisk.rsplit("/",1)[1][:(len(asm_disk1)-1)] + else: + asm_disk=asmdisk.rsplit("/",1)[1][:(len(asm_disk1)-2)] + + disc_str=asm_disk_dir + '/' + asm_disk + '*' + return disc_str + +######## set the ASM device permission ############### + def set_asmdisk_perm(self,key,eflag): + """ + This function set the correct permissions for ASM Disks + """ + if self.check_key(key,self.ora_env_dict): + self.log_info_message (key + " variable is set",self.file_name) + for device1 in self.ora_env_dict[key].split(','): + device=device1.strip('"') + if self.disk_exists(device): + msg='''Changing device permission {0}'''.format(device) + self.log_info_message(msg,self.file_name) + cmd='''chmod 660 {0};chown grid:asmdba {0}'''.format(device) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + else: + self.log_error_message('''ASM device {0} is passed but disk doesn't exist. Exiting..'''.format(device),self.file_name) + self.prog_exit("None") + else: + if eflag: + self.log_error_message(key + " is not passed. Exiting....",self.file_name) + self.prog_exit("None") + +######## sCLeanup the disks ############### + def asm_disk_cleanup(self,disk): + """ + This function cleanup the ASM Disks + """ + cmd='''dd if=/dev/zero of={0} bs=8k count=10000 '''.format(disk) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + + +######## Get the GI Image ############### + def get_gi_params(self): + """ + This function return the GI home + """ + gihome=self.ora_env_dict["GRID_HOME"] + gibase=self.ora_env_dict["GRID_BASE"] + giuser=self.ora_env_dict["GRID_USER"] + oinv=self.ora_env_dict["INVENTORY"] + + return giuser,gihome,gibase,oinv + +######## Get the TMPDIR ################ + def get_tmpdir(self): + """ + This function returns the TMPDIR + Returns: + tmpdir: return tmpdir + """ + return self.ora_env_dict["TMPDIR"] if self.check_key("TMPDIR",self.ora_env_dict) else "/var/tmp" + +######## Get the DB Image ############### + def get_db_params(self): + """ + This function return the DB home + """ + dbhome=self.ora_env_dict["DB_HOME"] + dbbase=self.ora_env_dict["DB_BASE"] + dbuser=self.ora_env_dict["DB_USER"] + oinv=self.ora_env_dict["INVENTORY"] + + return dbuser,dbhome,dbbase,oinv + +######## Get the cmd ############### + def get_sw_cmd(self,key,rspfile,node,netmasklist): + """ + This function return the installation cmd + """ + cmd="" + copyflag="" + if self.check_key("COPY_GRID_SOFTWARE",self.ora_env_dict): + copyflag=" -noCopy " + + prereq=" " + if self.check_key("IGNORE_CRS_PREREQS",self.ora_env_dict): + prereq=" -ignorePreReq " + + giuser,gihome,gbase,oinv=self.get_gi_params() + snic="-J-Doracle.install.crs.allowSingleNIC=true" if self.check_key("SINGLENIC",self.ora_env_dict) else "" + runCmd="" + if key == "INSTALL": + if self.check_key("APPLY_RU_LOCATION",self.ora_env_dict): + self.opatch_apply() + ruLoc=self.ora_env_dict["APPLY_RU_LOCATION"] + runCmd='''gridSetup.sh -applyRU "{0}"'''.format(self.ora_env_dict["APPLY_RU_LOCATION"]) + else: + runCmd='''gridSetup.sh ''' + + + if self.check_key("DEBUG_MODE",self.ora_env_dict): + dbgCmd='''{0} -debug '''.format(runCmd) + runCmd=dbgCmd + + self.log_info_message("runCmd set to : {0}".format(runCmd),self.file_name) + if self.detect_k8s_env(): + #param1="-skipPrereqs -J-Doracle.install.grid.validate.all=false oracle.install.crs.config.netmaskList=eth0:255.255.0.0,eth0:255.255.0.0" + if netmasklist is not None: + param1='''oracle.install.crs.config.netmaskList={0}'''.format(netmasklist) + else: + param1='''oracle.install.crs.config.netmaskList=eth0:255.255.0.0,eth1:255.255.255.0,eth2:255.255.255.0'''.format(netmasklist) + + cmd='''su - {0} -c "{1}/{6} -waitforcompletion {4} -silent {3} -responseFile {2} {5}"'''.format(giuser,gihome,rspfile,snic,copyflag,param1,runCmd) + else: + if self.check_key("APPLY_RU_LOCATION",self.ora_env_dict): + cmd='''su - {0} -c "{1}/{5} -waitforcompletion {4} -silent {6} {3} -responseFile {2}"'''.format(giuser,gihome,rspfile,snic,copyflag,runCmd,prereq) + else: + cmd='''su - {0} -c "{1}/{5} -waitforcompletion {4} -silent {6} {3} -responseFile {2}"'''.format(giuser,gihome,rspfile,snic,copyflag,runCmd,prereq) + elif key == 'ADDNODE': + status=self.check_home_inv(None,gihome,giuser) + if status: + copyflag=" -noCopy " + cmd='''su - {0} -c "ssh {1} '{2}/gridSetup.sh -silent -waitForCompletion {3} {5} -responseFile {4}'"'''.format(giuser,node,gihome,copyflag,rspfile,prereq) + else: + copyflag=" " + cmd='''su - {0} -c "ssh {1} '{2}/gridSetup.sh -silent -waitForCompletion {3} {5} -responseFile {4} '"'''.format(giuser,node,gihome,copyflag,rspfile,prereq) + else: + pass + return cmd + +########## Installing Grid Software on Individual nodes + def crs_sw_install_on_node(self,giuser,copyflag,crs_nodes,oinv,gihome,gibase,osdba,osoper,osasm,version,node): + """ + This function install crs sw on every node and register with oraInventory + """ + cmd=None + prereq=" " + if self.check_key("IGNORE_CRS_PREREQS",self.ora_env_dict): + prereq=" -ignorePreReq " + if int(version) < 23: + rspdata='''su - {0} -c "ssh {10} {1}/gridSetup.sh {11} -waitforcompletion {2} -silent + installOption=CRS_SWONLY + clusterNodes={3} + INVENTORY_LOCATION={4} + ORACLE_HOME={5} + ORACLE_BASE={6} + OSDBA={7} + OSOPER={8} + OSASM={9}"'''.format(giuser,gihome,copyflag,crs_nodes,oinv,gihome,gibase,osdba,osoper,osasm,node,prereq) + + cmd=rspdata.replace('\n'," ") + else: + cmd='''su - {0} -c "ssh {10} '{1}/gridSetup.sh -silent -setupHome -OSDBA {7} -OSOPER {8} -OSASM {9} -ORACLE_BASE {6} -INVENTORY_LOCATION {4} -clusterNodes {10} {2}\'"'''.format(giuser,gihome,copyflag,crs_nodes,oinv,gihome,gibase,osdba,osoper,osasm,node) + + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,None) + self.check_crs_sw_install(output) + + def opatch_apply(self): + """This function apply opatch before apply RU + """ + giuser,gihome,gbase,oinv=self.get_gi_params() + today=datetime.date.today() + if self.check_key("OPATCH_ZIP_FILE",self.ora_env_dict): + cmd1='''su - {2} -c "mv {0}/OPatch {0}/OPatch_{1}_old"'''.format(gihome,today,giuser) + cmd2='''su - {2} -c "unzip {0} -d {1}/"'''.format(self.ora_env_dict["OPATCH_ZIP_FILE"],gihome,giuser) + for cmd in cmd1,cmd2: + output,error,retcode=self.execute_cmd(cmd,None,True) + self.check_os_err(output,error,retcode,True) + + def check_crs_sw_install(self,swdata): + """ + This function check the if the sw install went fine + """ + if not self.check_substr_match(swdata,"root.sh"): + self.log_error_message("Grid software install failed. Exiting...",self.file_name) + self.prog_exit("127") + + def run_orainstsh_local(self,giuser,node,oinv): + """ + This function run the orainst after grid setup + """ + cmd='''su - {0} -c "sudo {2}/orainstRoot.sh"'''.format(giuser,node,oinv) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + + def run_rootsh_local(self,gihome,giuser,node): + """ + This function run the root.sh after grid setup + """ + self.log_info_message("Running root.sh on node " + node,self.file_name) + cmd='''su - {0} -c "sudo {2}/root.sh"'''.format(giuser,node,gihome) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + +######## Get the oraversion ############### + def get_rsp_version(self,key,node): + """ + This function return the oraVersion + """ + cmd="" + giuser,gihome,gbase,oinv=self.get_gi_params() + if key == "INSTALL": + cmd='''su - {0} -c "{1}/bin/oraversion -majorVersion"'''.format(giuser,gihome) + elif key == 'ADDNODE': + cmd='''su - {0} -c "ssh {2} {1}/bin/oraversion -majorVersion"'''.format(giuser,gihome,node) + else: + pass + + vdata="" + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,None) + if output.strip() == "12.2": + vdata="12.2.0" + elif output.strip() == "21": + vdata = "21.0.0" + elif output.strip() == "23": + vdata = "23.0.0" + elif output.strip() == "19": + vdata = "19.0.0" + elif output.strip() == "18": + vdata = "18.0.0" + else: + self.log_error_message("The SW major version is not matching {12.2|18.3|19.3|21.3}. Exiting....",self.file_name) + self.prog_exit("None") + + return vdata + +######### Check if GI is already installed on this machine ########### + def check_gi_installed(self,retcode1,gihome,giuser): + """ + Check if the Gi is installed on this machine + """ + if retcode1 == 0: + if os.path.isdir("/etc/oracle"): + bstr="Grid is already installed on this machine and /etc/oracle also exist. Skipping Grid setup.." + self.log_info_message(self.print_banner(bstr),self.file_name) + return True + else: + dir = os.listdir(gihome) + if len(dir) != 0: + status=self.check_home_inv(None,gihome,giuser) + if status: + status=self.restore_gi_files(gihome,giuser) + if not status: + return False + else: + status=self.start_crs(gihome,giuser) + if status: + return True + else: + return False + else: + bstr="Grid is not configured on this machine and /etc/oracle does not exist." + self.log_info_message(self.print_banner(bstr),self.file_name) + return False + else: + self.log_info_message("Grid is not installed on this machine. Proceeding further...",self.file_name) + return False + +######## Restoring GI FIles ####################### + def restore_gi_files(self,gihome,giuser): + """ + Restoring GI Files + """ + cmd='''{1}/crs/install/rootcrs.sh -updateosfiles'''.format(giuser,gihome) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,None) + if retcode == 0: + return True + else: + return False + +###### Starting Crs ############### + def start_crs(self,gihome,giuser): + """ + starting CRS + """ + cmd='''{1}/bin/crsctl start crs'''.format(giuser,gihome) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,None) + if retcode == 0: + return True + else: + return False + +######### Check if GI is already installed on this machine ########### + def check_rac_installed(self,retcode1): + """ + Check if the RAC is installed on this machine + """ + if retcode1 == 0: + bstr="RAC HOME is already installed on this machine!" + self.log_info_message(self.print_banner(bstr),self.file_name) + return True + else: + self.log_info_message("Oracle RAC home is not installed on this machine. Proceeding further...",self.file_name) + return False + + +######## Print the banner ############### + def print_banner(self,btext): + """ + print the banner + """ + strlen=len(btext) + sep='=' + sepchar=sep * strlen + banner_text=''' + {0} + {1} + {0} + '''.format(sepchar,btext) + return banner_text + +######### Sqlplus connect string ########### + def get_sqlplus_str(self,home,osid,osuser,dbuser,password,hostname,port,svc,osep,role,wallet): + """ + return the sqlplus connect string + """ + path='''/usr/bin:/bin:/sbin:/usr/local/sbin:{0}/bin'''.format(home) + ldpath='''{0}/lib:/lib:/usr/lib'''.format(home) + export_cmd='''export ORACLE_HOME={0};export PATH={1};export LD_LIBRARY_PATH={2};export ORACLE_SID={3}'''.format(home,path,ldpath,osid) + if dbuser == 'sys' and password and hostname and port and svc: + return '''su - {7} -c "{5};{6}/bin/sqlplus -S {0}/{1}@//{2}:{3}/{4} as sysdba"'''.format(dbuser,password,hostname,port,svc,export_cmd,home,osuser) + elif dbuser != 'sys' and password and hostname and svc: + return '''su - {7} -c "{5};{6}/bin/sqlplus -S {0}/{1}@//{2}:{3}/{4}"'''.format(dbuser,password,hostname,"1521",svc,export_cmd,home,osuser) + elif dbuser and osep: + return dbuser + elif dbuser == 'sys' and not password: + return '''su - {2} -c "{1};{0}/bin/sqlplus -S '/ as sysdba'"'''.format(home,export_cmd,osuser) + elif dbuser == 'sys' and password: + return '''su - {4} -c "{1};{0}/bin/sqlplus -S {2}/{3} as sysdba"'''.format(home,export_cmd,dbuser,password,osuser) + elif dbuser != 'sys' and password: + return '''su - {4} -c "{1};{0}/bin/sqlplus -S {2}/{3}"'''.format(home,export_cmd,dbuser,password,osuser) + else: + self.log_info_message("Atleast specify db user and password for db connectivity. Exiting...",self.file_name) + self.prog_exit("127") + +######### RMAN connect string ########### + def get_rman_str(self,home,osid,osuser,dbuser,password,hostname,port,svc,osep,role,wallet): + """ + return the rman connect string + """ + path='''/usr/bin:/bin:/sbin:/usr/local/sbin:{0}/bin'''.format(home) + ldpath='''{0}/lib:/lib:/usr/lib'''.format(home) + export_cmd='''export ORACLE_HOME={0};export PATH={1};export LD_LIBRARY_PATH={2};export ORACLE_SID={3}'''.format(home,path,ldpath,osid) + if dbuser == 'sys' and password and hostname and port and svc: + return '''su - {7} -c "{5};{6}/bin/rman {0}/{1}@//{2}:{3}/{4}"'''.format(dbuser,password,hostname,port,svc,export_cmd,home,osuser) + elif dbuser != 'sys' and password and hostname and svc: + return '''su - {7} -c "{5};{6}/bin/rman {0}/{1}@//{2}:{3}/{4}"'''.format(dbuser,password,hostname,"1521",svc,export_cmd,home +,osuser) + elif dbuser == 'sys' and not password: + return '''su - {2} -c "{1};{0}/bin/rman target /"'''.format(home,export_cmd,osuser) + elif dbuser == 'sys' and password: + return '''su - {4} -c "{1};{0}/bin/rman target {2}/{3}"'''.format(home,export_cmd,dbuser,password,osuser) + elif dbuser != 'sys' and password: + return '''su - {4} -c "{1};{0}/bin/rman target {2}/{3}"'''.format(home,export_cmd,dbuser,password,osuser) + else: + self.log_info_message("Atleast specify db user and password for db connectivity. Exiting...",self.file_name) + self.prog_exit("127") + +######### dgmgrl connect string ########### + def get_dgmgr_str(self,home,osid,osuser,dbuser,password,hostname,port,svc,osep,role,wallet): + """ + return the dgmgrl connect string + """ + path='''/usr/bin:/bin:/sbin:/usr/local/sbin:{0}/bin'''.format(home) + ldpath='''{0}/lib:/lib:/usr/lib'''.format(home) + if role is None: + role='sysdg' + + export_cmd='''export ORACLE_HOME={0};export PATH={1};export LD_LIBRARY_PATH={2};export ORACLE_SID={3}'''.format(home,path,ldpath,osid) + if dbuser == 'sys' and password and hostname and port and svc: + return '''su - {7} -c "{5};{6}/bin/dgmgrl {0}/{1}@//{2}:{3}/{4} as {8}"'''.format(dbuser,password,hostname,port,svc,export_cmd,home,osuser,role) + elif dbuser != 'sys' and password and hostname and svc: + return '''su - {7} -c "{5};{6}/bin/dgmgrl {0}/{1}@//{2}:{3}/{4} as {8}"'''.format(dbuser,password,hostname,"1521",svc,export_cmd,home,osuser,role) + elif dbuser and osep: + return dbuser + elif dbuser == 'sys' and not password: + return '''su - {2} -c "{1};{0}/bin/dgmgrl /"'''.format(home,export_cmd,osuser) + elif dbuser == 'sys' and password: + return '''su - {4} -c "{1};{0}/bin/dgmgrl {2}/{3} as {5}"'''.format(home,export_cmd,dbuser,password,osuser,role) + elif dbuser != 'sys' and password: + return '''su - {4} -c "{1};{0}/bin/dgmgrl {2}/{3}"'''.format(home,export_cmd,dbuser,password,osuser) + else: + self.log_info_message("Atleast specify db user and password for db connectivity. Exiting...",self.file_name) + self.prog_exit("127") + +######## function to get tnssvc str ###### + def get_tnssvc_str(self,dbsvc,dbport,dbscan): + """ + return tnssvc + """ + tnssvc='''(DESCRIPTION = (ADDRESS = (PROTOCOL = TCP)(HOST = {0})(PORT = {1})) (CONNECT_DATA = (SERVER = DEDICATED) (SERVICE_NAME = {2})))'''.format(dbscan,dbport,dbsvc) + return tnssvc + +######### Sqlplus ########### + def get_inst_sid(self,dbuser,dbhome,osid,hostname): + """ + return the sid + """ + path='''/usr/bin:/bin:/sbin:/usr/local/sbin:{0}/bin'''.format(dbhome) + ldpath='''{0}/lib:/lib:/usr/lib'''.format(dbhome) + cmd='''su - {5} -c "export ORACLE_HOME={0};export PATH={1};export LD_LIBRARY_PATH={2}; {0}/bin/srvctl status database -d {3} | grep {4}"'''.format(dbhome,path,ldpath,osid,hostname,dbuser) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,None) + if len(output.split(" ")) > 1: + inst_sid=output.split(" ")[1] + return inst_sid + else: + return None + +######### Stop RAC DB ######## + def stop_rac_db(self,dbuser,dbhome,osid,hostname): + """ + stop the Database + """ + path='''/usr/bin:/bin:/sbin:/usr/local/sbin:{0}/bin'''.format(dbhome) + ldpath='''{0}/lib:/lib:/usr/lib'''.format(dbhome) + cmd='''su - {5} -c "export ORACLE_HOME={0};export PATH={1};export LD_LIBRARY_PATH={2}; {0}/bin/srvctl stop database -d {3}"'''.format(dbhome,path,ldpath,osid,hostname,dbuser) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + +######### Stop RAC DB ######## + def get_host_dbsid(self,hname,connect_str): + """ + get the host sid based on hostname + """ + sqlcmd=''' + set heading off; + set pagesize 0; + select instance_name from gv$instance where HOST_NAME='{0}'; + exit; + '''.format(hname) + self.set_mask_str(self.get_sys_passwd()) + output,error,retcode=self.run_sqlplus(connect_str,sqlcmd,None) + self.log_info_message("Calling check_sql_err() to validate the sql command return status",self.file_name) + self.check_sql_err(output,error,retcode,None) + self.unset_mask_str() + return output.strip() + + +######### Get SVC Domain ######## + def get_svc_domain(self,hname): + """ + get the host domain baded on service name + """ + svc_dom=None + cmd='''nslookup {0}'''.format(hname) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,None) + for line in output.split('\n'): + if "Name:" in line: + svc_dom=line.split(':')[1].strip() + return svc_dom + +######### Stop RAC DB ######## + def start_rac_db(self,dbuser,dbhome,osid,node=None,startoption=None): + """ + Start the Database + """ + path='''/usr/bin:/bin:/sbin:/usr/local/sbin:{0}/bin'''.format(dbhome) + ldpath='''{0}/lib:/lib:/usr/lib'''.format(dbhome) + + if node is None: + nodename="" + else: + nodename=node + + if startoption is None: + startflag="" + else: + startflag=''' -o {0}'''.format(startoption) + + cmd='''su - {5} -c "export ORACLE_HOME={0};export PATH={1};export LD_LIBRARY_PATH={2}; {0}/bin/srvctl start database -d {3} {6}"'''.format(dbhome,path,ldpath,osid,nodename,dbuser,startflag) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + +######### DB-Status ########### + def get_db_status(self,dbuser,dbhome,osid): + """ + return the status of the database + """ + path='''/usr/bin:/bin:/sbin:/usr/local/sbin:{0}/bin'''.format(dbhome) + ldpath='''{0}/lib:/lib:/usr/lib'''.format(dbhome) + + cmd='''su - {4} -c "export ORACLE_HOME={0};export PATH={1};export LD_LIBRARY_PATH={2}; {0}/bin/srvctl status database -d {3}"'''.format(dbhome,path,ldpath,osid,dbuser) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + + def get_dbinst_status(self,dbuser,dbhome,inst_sid,sqlpluslogincmd): + """ + return the status of the local dbinstance + """ + sqlcmd=''' + set heading off; + set pagesize 0; + select status from v$instance; + exit; + ''' + self.set_mask_str(self.get_sys_passwd()) + output,error,retcode=self.run_sqlplus(sqlpluslogincmd,sqlcmd,None) + self.log_info_message("Calling check_sql_err() to validate the sql command return status",self.file_name) + self.check_sql_err(output,error,retcode,None) + self.unset_mask_str() + return output + +##### DB-Config ###### + def get_db_config(self,dbuser,dbhome,osid): + """ + return the db-config + """ + path='''/usr/bin:/bin:/sbin:/usr/local/sbin:{0}/bin'''.format(dbhome) + ldpath='''{0}/lib:/lib:/usr/lib'''.format(dbhome) + + cmd='''su - {4} -c "export ORACLE_HOME={0};export PATH={1};export LD_LIBRARY_PATH={2}; {0}/bin/srvctl config database -d {3}"'''.format(dbhome,path,ldpath,osid,dbuser) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + +##### Get service name ##### + def get_service_name(self): + """ + This function get the service_name. + """ + self.log_info_message("Inside get_service_name()",self.file_name) + service_name=None + osid=None + opdb=None + sparams=None + + reg_exp= self.service_regex() + for key in self.ora_env_dict.keys(): + if(reg_exp.match(key)): + rac_service_exist=None + service_name,osid,opdb,uniformflag,sparams=self.process_service_vars(key,None) + + return service_name,osid,opdb,sparams + +##### Setup DB Service ###### + def setup_db_service(self,type): + """ + This function setup the Oracle RAC database service. + """ + self.log_info_message("Inside setup_db_service()",self.file_name) + status=False + service_name=None + reg_exp= self.service_regex() + for key in self.ora_env_dict.keys(): + if(reg_exp.match(key)): + rac_service_exist=None + service_name,osid,opdb,uniformflag,sparams=self.process_service_vars(key,type) + rac_service_exist=self.check_db_service_exist(service_name,osid) + if not rac_service_exist: + if type.lower() == "create": + self.create_db_service(service_name,osid,opdb,sparams) + else: + if type.lower() == "modify" and uniformflag is not True: + self.modify_db_service(service_name,osid,opdb,sparams) + else: + pass + rac_service_exist=self.check_db_service_exist(service_name,osid) + if rac_service_exist: + msg='''RAC db service exist''' + else: + msg='''RAC db service does not exist or creation failed''' + +##### Process DB Service ###### + def process_service_vars(self,key,type): + """ + This function process the service parameters for RAC service creation + """ + service=None + preferred=None + available=None + cardinality=None + tafpolicy=None + role=None + policy=None + resetstate=None + failovertype=None + failoverdelay=None + failoverretry=None + failover_restore=None + failback=None + pdb=None + clbgoal=None + rlbgoal=None + dtp=None + notification=None + commit_outcome=None + commit_outcome_fastpath=None + replay_init_time=None + session_state=None + drain_timeout=None + db=None + sparam="" + uniformflag=None + + if type is None: + type="create" + + self.log_info_message("Inside process_service_vars()",self.file_name) + cvar_str=self.ora_env_dict[key] + cvar_dict=dict(item.split(":") for item in cvar_str.split(";")) + for ckey in cvar_dict.keys(): + if type.lower() == 'modify': + if ckey == 'service': + service = cvar_dict[ckey] + sparam=sparam + " -service " + service + if ckey == 'preferred': + preferred = cvar_dict[ckey] + sparam=sparam +" -modifyconfig -preferred " + preferred + if ckey == 'available': + available = cvar_dict[ckey] + sparam=sparam +" -available " + available + else: + if ckey == 'service': + service = cvar_dict[ckey] + sparam=sparam + " -service " + service + if ckey == 'role': + role = cvar_dict[ckey] + sparam=sparam +" -role " + role + if ckey == 'preferred': + preferred = cvar_dict[ckey] + sparam=sparam +" -preferred " + preferred + if ckey == 'available': + available = cvar_dict[ckey] + sparam=sparam +" -available " + available + if ckey == 'cardinality': + cardinality = cvar_dict[ckey] + sparam=sparam +" -cardinality " + cardinality + uniformflag=True + if ckey == 'policy': + policy = cvar_dict[ckey] + sparam=sparam +" -policy " + policy + if ckey == 'tafpolicy': + tafpolicy = cvar_dict[ckey] + sparam=sparam +" -tafpolicy " + tafpolicy + if ckey == 'resetstate': + resetstate = cvar_dict[ckey] + sparam=sparam +" -resetstate " + resetstate + if ckey == 'failovertype': + failovertype = cvar_dict[ckey] + sparam=sparam +" -failovertype " + failovertype + if ckey == 'failoverdelay': + failoverdelay = cvar_dict[ckey] + sparam=sparam +" -failoverdelay " + failoverdelay + if ckey == 'failoverretry': + failoverretry = cvar_dict[ckey] + sparam=sparam +" -failoverretry " + failoverretry + if ckey == 'failback': + failback = cvar_dict[ckey] + sparam=sparam +" -failback " + failback + if ckey == 'failover_restore': + failover_restore = cvar_dict[ckey] + sparam=sparam +" -failover_restore " + failover_restore + if ckey == 'pdb': + pdb = cvar_dict[ckey] + if ckey == 'clbgoal': + clbgoal = cvar_dict[ckey] + sparam=sparam +" -clbgoal " + clbgoal + if ckey == 'rlbgoal': + rlbgoal = cvar_dict[ckey] + sparam=sparam +" -rlbgoal " + rlbgoal + if ckey == 'dtp': + dtp = cvar_dict[ckey] + sparam=sparam +" -dtp " + dtp + if ckey == 'notification': + notification = cvar_dict[ckey] + sparam=sparam +" -notification " + notification + if ckey == 'commit_outcome': + commit_outcome = cvar_dict[ckey] + sparam=sparam +" -commit_outcome " +commit_outcome + if ckey == 'commit_outcome_fastpath': + commit_outcome_fastpath = cvar_dict[ckey] + sparam=sparam +" -commit_outcome_fastpath " + commit_outcome_fastpath + if ckey == 'replay_init_time': + replay_init_time = cvar_dict[ckey] + sparam=sparam +" -replay_init_time " + replay_init_time + if ckey == 'session_state': + session_state = cvar_dict[ckey] + sparam=sparam +" -session_state " + session_state + if ckey == 'drain_timeout': + drain_timeout = cvar_dict[ckey] + sparam=sparam +" -drain_timeout " + drain_timeout + if ckey == 'db': + db = cvar_dict[ckey] + sparam=sparam +" -db " + db + + ### Check values must be set + if uniformflag is not True: + if pdb is None: + pdb = self.ora_env_dict["ORACLE_PDB_NAME"] if self.check_key("ORACLE_PDB_NAME",self.ora_env_dict) else "ORCLPDB" + sparam=sparam +" -pdb " + pdb + else: + sparam=sparam +" -pdb " + pdb + else: + pdb = self.ora_env_dict["ORACLE_PDB_NAME"] if self.check_key("ORACLE_PDB_NAME",self.ora_env_dict) else "ORCLPDB" + + if db is None: + db = self.ora_env_dict["DB_NAME"] + sparam=sparam +" -db " + db + + if service and db and pdb: + return service,db,pdb,uniformflag,sparam + else: + msg1='''service={0},pdb={1},db={2}'''.format((service or "Missing Value"),(pdb or "Missing Value"),(db or "Missing Value")) + msg='''RAC service params {0} is not set correctly. One or more value is missing {1}'''.format(key,msg1) + self.log_error_message(msg,self.file_name) + self.prog_exit("Error occurred") + +#### Process Service Regex #### + def service_regex(self): + """ + This function return the rgex to search the SERVICE[0-9]_PARAMS + """ + self.log_info_message("Inside service_regex()",self.file_name) + return re.compile('SERVICE[0-9]+_PARAMS') + +##### craete DB service ###### + def create_db_service(self,service_name,osid,opdb,sparams): + """ + create database service + """ + dbuser,dbhome,dbase,oinv=self.get_db_params() + path='''/usr/bin:/bin:/sbin:/usr/local/sbin:{0}/bin'''.format(dbhome) + ldpath='''{0}/lib:/lib:/usr/lib'''.format(dbhome) + cmd='''su - {4} -c "export ORACLE_HOME={0};export PATH={1};export LD_LIBRARY_PATH={2}; {0}/bin/srvctl add service {5}"'''.format(dbhome,path,ldpath,osid,dbuser,sparams,opdb,service_name) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,None) + +##### craete DB service ###### + def modify_db_service(self,service_name,osid,opdb,sparams): + """ + modify database service + """ + dbuser,dbhome,dbase,oinv=self.get_db_params() + path='''/usr/bin:/bin:/sbin:/usr/local/sbin:{0}/bin'''.format(dbhome) + ldpath='''{0}/lib:/lib:/usr/lib'''.format(dbhome) + cmd='''su - {4} -c "export ORACLE_HOME={0};export PATH={1};export LD_LIBRARY_PATH={2}; {0}/bin/srvctl modify service {5}"'''.format(dbhome,path,ldpath,osid,dbuser,sparams,opdb,service_name) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,None) + +##### check Db service ###### + def check_db_service_exist(self,service_name,osid): + """ + check if db service exist + """ + dbuser,dbhome,dbase,oinv=self.get_db_params() + path='''/usr/bin:/bin:/sbin:/usr/local/sbin:{0}/bin'''.format(dbhome) + ldpath='''{0}/lib:/lib:/usr/lib'''.format(dbhome) + cmd='''su - {4} -c "export ORACLE_HOME={0};export PATH={1};export LD_LIBRARY_PATH={2}; {0}/bin/srvctl status service -db {3} -s {5}"'''.format(dbhome,path,ldpath,osid,dbuser,service_name) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,None) + msg='''PRKO-2017'''.format(service_name,osid) + if self.check_substr_match(output.lower(),msg.lower()): + return False + else: + return True + +##### check service ###### + def check_db_service_status(self,service_name,osid): + """ + check if db service is running + """ + dbuser,dbhome,dbase,oinv=self.get_db_params() + path='''/usr/bin:/bin:/sbin:/usr/local/sbin:{0}/bin'''.format(dbhome) + ldpath='''{0}/lib:/lib:/usr/lib'''.format(dbhome) + cmd='''su - {4} -c "export ORACLE_HOME={0};export PATH={1};export LD_LIBRARY_PATH={2}; {0}/bin/srvctl status service -db {3} -s {5}"'''.format(dbhome,path,ldpath,osid,dbuser,service_name) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,None) + msg='''Service {0} is running on'''.format(service_name) + if self.check_substr_match(output.lower(),msg.lower()): + return True + else: + return False + +##### check service ###### + def start_db_service(self,service_name,osid): + """ + start the DB service + """ + dbuser,dbhome,dbase,oinv=self.get_db_params() + path='''/usr/bin:/bin:/sbin:/usr/local/sbin:{0}/bin'''.format(dbhome) + ldpath='''{0}/lib:/lib:/usr/lib'''.format(dbhome) + cmd='''su - {4} -c "export ORACLE_HOME={0};export PATH={1};export LD_LIBRARY_PATH={2}; {0}/bin/srvctl start service -db {3} -s {5}"'''.format(dbhome,path,ldpath,osid,dbuser,service_name) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,None) + +######### Add RAC DB ######## + def add_rac_db(self,dbuser,dbhome,osid,spfile): + """ + add the Database + """ + path='''/usr/bin:/bin:/sbin:/usr/local/sbin:{0}/bin'''.format(dbhome) + ldpath='''{0}/lib:/lib:/usr/lib'''.format(dbhome) + cmd='''su - {5} -c "export ORACLE_HOME={0};export PATH={1};export LD_LIBRARY_PATH={2}; {0}/bin/srvctl add database -d {3} -oraclehome {0} -dbtype RAC -spfile '{4}'"'''.format(dbhome,path,ldpath,osid,spfile,dbuser) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + +######### Add RAC DB ######## + def add_rac_db_lsnr(self,dbuser,dbhome,osid,endpoints,lsnrname): + """ + add the Database + """ + path='''/usr/bin:/bin:/sbin:/usr/local/sbin:{0}/bin'''.format(dbhome) + ldpath='''{0}/lib:/lib:/usr/lib'''.format(dbhome) + cmd='''su - {3} -c "export ORACLE_HOME={0};export PATH={1};export LD_LIBRARY_PATH={2}; {0}/bin/srvctl add listener -listener {4} -endpoints {5}; {0}/bin/srvctl start listener -listener {4}"'''.format(dbhome,path,ldpath,dbuser,lsnrname,endpoints) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + +######### Add RAC DB ######## + def modify_rac_db_lsnr(self,dbuser,dbhome,osid,endpoints,lsnrname): + """ + add the Database + """ + path='''/usr/bin:/bin:/sbin:/usr/local/sbin:{0}/bin'''.format(dbhome) + ldpath='''{0}/lib:/lib:/usr/lib'''.format(dbhome) + cmd='''su - {3} -c "export ORACLE_HOME={0};export PATH={1};export LD_LIBRARY_PATH={2}; {0}/bin/srvctl modify listener -listener {4} -endpoints {5}"'''.format(dbhome,path,ldpath,dbuser,lsnrname,endpoints) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + +######### Add RAC DB ######## + def check_rac_db_lsnr(self,dbuser,dbhome,osid,endpoints,lsnrname): + """ + add the Database + """ + path='''/usr/bin:/bin:/sbin:/usr/local/sbin:{0}/bin'''.format(dbhome) + ldpath='''{0}/lib:/lib:/usr/lib'''.format(dbhome) + cmd='''su - {3} -c "export ORACLE_HOME={0};export PATH={1};export LD_LIBRARY_PATH={2}; {0}/bin/srvctl status listener -listener {6}"'''.format(dbhome,path,ldpath,dbuser,lsnrname,endpoints,lsnrname) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,None) + msg='''Listener {0} is enabled'''.format(lsnrname) + if self.check_substr_match(output.lower(),msg.lower()): + return True + else: + return False + +######### Add RAC DB ######## + def update_scan(self,user,home,endpoints,node): + """ + Update Scan + """ + path='''/usr/bin:/bin:/sbin:/usr/local/sbin:{0}/bin'''.format(home) + ldpath='''{0}/lib:/lib:/usr/lib'''.format(home) + scanname=self.ora_env_dict["SCAN_NAME"] + cmd='''su - {3} -c "ssh {6} 'export ORACLE_HOME={0};export PATH={1};export LD_LIBRARY_PATH={2}; sudo {0}/bin/srvctl modify scan -scanname {4}'"'''.format(home,path,ldpath,user,scanname,endpoints,node) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,None) + +######### Add RAC DB ######## + def start_scan(self,user,home,node): + """ + Update Scan + """ + path='''/usr/bin:/bin:/sbin:/usr/local/sbin:{0}/bin'''.format(home) + ldpath='''{0}/lib:/lib:/usr/lib'''.format(home) + scanname=self.ora_env_dict["SCAN_NAME"] + cmd='''su - {3} -c "ssh {5} 'export ORACLE_HOME={0};export PATH={1};export LD_LIBRARY_PATH={2};sudo {0}/bin/srvctl start scan'"'''.format(home,path,ldpath,user,scanname,node) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,None) + +######### Add RAC DB ######## + def update_scan_lsnr(self,user,home,node): + """ + Update Scan + """ + path='''/usr/bin:/bin:/sbin:/usr/local/sbin:{0}/bin'''.format(home) + ldpath='''{0}/lib:/lib:/usr/lib'''.format(home) + scanname=self.ora_env_dict["SCAN_NAME"] + cmd='''su - {3} -c "ssh {4} 'export ORACLE_HOME={0};export PATH={1};export LD_LIBRARY_PATH={2};{0}/bin/srvctl modify scan_listener -update'"'''.format(home,path,ldpath,user,node) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,None) +######### Add RAC DB ######## + def start_scan_lsnr(self,user,home,node): + """ + start Scan listener + """ + path='''/usr/bin:/bin:/sbin:/usr/local/sbin:{0}/bin'''.format(home) + ldpath='''{0}/lib:/lib:/usr/lib'''.format(home) + scanname=self.ora_env_dict["SCAN_NAME"] + cmd='''su - {3} -c "ssh {4} 'export ORACLE_HOME={0};export PATH={1};export LD_LIBRARY_PATH={2};{0}/bin/srvctl start scan_listener'"'''.format(home,path,ldpath,user,node) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,None) + +######### Set DB Lsnr ######## + def setup_db_lsnr(self): + """ + Create and Setup DB lsnr + """ + giuser,gihome,gibase,oinv =self.get_gi_params() + status,osid,host,mode=self.check_dbinst() + endpoints=self.ora_env_dict["DB_LISTENER_ENDPOINTS"] if self.check_key("DB_LISTENER_ENDPOINTS",self.ora_env_dict) else None + lsnrname=self.ora_env_dict["DB_LISTENER_NAME"] if self.check_key("DB_LISTENER_NAME",self.ora_env_dict) else "dblsnr" + + if status: + if endpoints is not None and lsnrname is not None: + status1=self.check_rac_db_lsnr(giuser,gihome,osid,endpoints,lsnrname) + if not status1: + self.add_rac_db_lsnr(giuser,gihome,osid,endpoints,lsnrname) + else: + self.modify_rac_db_lsnr(giuser,gihome,osid,endpoints,lsnrname) + else: + self.log_info_message("DB Instance is not up",self.file_name) + +######### Add RACDB Instance ######## + def add_rac_instance(self,dbuser,dbhome,osid,instance_number,nodename): + """ + add the RAC Database Instance + """ + path='''/usr/bin:/bin:/sbin:/usr/local/sbin:{0}/bin'''.format(dbhome) + ldpath='''{0}/lib:/lib:/usr/lib'''.format(dbhome) + cmd='''su - {5} -c "export ORACLE_HOME={0};export PATH={1};export LD_LIBRARY_PATH={2}; {0}/bin/srvctl add instance -d {3} -i {4} -node {6}"'''.format(dbhome,path,ldpath,osid,osid+instance_number,dbuser,nodename) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + +######### get DB Role ######## + def get_db_role(self,dbuser,dbhome,inst_sid,sqlpluslogincmd): + """ + return the + """ + sqlcmd=''' + set heading off; + set pagesize 0; + select database_role from v$database; + exit; + ''' + self.set_mask_str(self.get_sys_passwd()) + output,error,retcode=self.run_sqlplus(sqlpluslogincmd,sqlcmd,None) + self.log_info_message("Calling check_sql_err() to validate the sql command return status",self.file_name) + self.check_sql_err(output,error,retcode,None) + self.unset_mask_str() + return output + +######### Sqlplus ########### + def check_setup_status(self,dbuser,dbhome,inst_sid,sqlpluslogincmd): + """ + return the RAC setup status. It check a status in the table. + """ + fname='''/tmp/{0}'''.format("rac_setup.txt") + self.remove_file(fname) + self.set_mask_str(self.get_sys_passwd()) + msg='''Checking racsetup table in CDB''' + self.log_info_message(msg,self.file_name) + sqlcmd=''' + set heading off + set feedback off + set term off + SET NEWPAGE NONE + spool {0} + select * from system.racsetup WHERE ROWNUM = 1; + spool off + exit; + '''.format(fname) + output,error,retcode=self.run_sqlplus(sqlpluslogincmd,sqlcmd,None) + self.log_info_message("Calling check_sql_err() to validate the sql command return status",self.file_name) + self.check_sql_err(output,error,retcode,None) + + if os.path.isfile(fname): + fdata=self.read_file(fname) + else: + fdata='nosetup' + + ### Unsetting the encrypt value to None + self.unset_mask_str() + + if re.search('completed',fdata): + #status = self.catalog_pdb_setup_check(host,ccdb,svc,port) + #if status == 'completed': + return 'completed' + #else: + # return 'notcompleted' + else: + return 'notcompleted' + +#### Get DB Parameters ####### + def get_init_params(self,paramname,sqlpluslogincmd): + """ + return the + """ + sqlcmd=''' + set heading off; + set pagesize 0; + set feedback off + select value from v$parameter where upper(name)=upper('{0}'); + exit; + '''.format(paramname) + + self.set_mask_str(self.get_sys_passwd()) + output,error,retcode=self.run_sqlplus(sqlpluslogincmd,sqlcmd,None) + self.log_info_message("Calling check_sql_err() to validate the sql command return status",self.file_name) + self.check_sql_err(output,error,retcode,None) + self.unset_mask_str() + return output.strip() + +#### set DB Params ####### + def run_sql_cmd(self,sqlcmd,sqlpluslogincmd): + """ + return the + """ + self.set_mask_str(self.get_sys_passwd()) + output,error,retcode=self.run_sqlplus(sqlpluslogincmd,sqlcmd,None) + self.log_info_message("Calling check_sql_err() to validate the sql command return status",self.file_name) + self.check_sql_err(output,error,retcode,None) + self.unset_mask_str() + return output + +#### Set sqlcmd ######## + def get_sqlsetcmd(self): + """ + return the sql set commands + """ + sqlsetcmd=''' + set heading off + set pagesize 0 + set feedback off + ''' + return sqlsetcmd + +#### Check DB Inst ############# + def check_dbinst(self): + """ + This function the db inst + """ + osuser,dbhome,dbbase,oinv=self.get_db_params() + dbname,osid,dbuname=self.getdbnameinfo() + hostname = self.get_public_hostname() + inst_sid=self.get_inst_sid(osuser,dbhome,osid,hostname) + connect_str=self.get_sqlplus_str(dbhome,inst_sid,osuser,"sys",None,None,None,None,None,None,None) + if inst_sid: + status=self.get_dbinst_status(osuser,dbhome,inst_sid,connect_str) + if not self.check_substr_match(status,"OPEN"): + return False,inst_sid,hostname,status + else: + return True,inst_sid,hostname,status + else: + return False,inst_sid,hostname,"" + +######## Set Remote Listener ###### + def set_remote_listener(self): + """ + This function set the remote listener + """ + if self.check_key("CMAN_HOST",self.ora_env_dict): + cmanhost=self.ora_env_dict["CMAN_HOST"] + osuser,dbhome,dbbase,oinv=self.get_db_params() + dbname,osid,dbuname=self.getdbnameinfo() + scanname=self.ora_env_dict["SCAN_NAME"] if self.check_key("SCAN_NAME",self.ora_env_dict) else self.prog_exit("127") + scanport=self.ora_env_dict["SCAN_PORT"] if self.check_key("SCAN_PORT",self.ora_env_dict) else "1521" + cmanport=self.ora_env_dict["CMAN_PORT"] if self.check_key("CMAN_PORT",self.ora_env_dict) else "1521" + hostname = self.get_public_hostname() + inst_sid=self.get_inst_sid(osuser,dbhome,osid,hostname) + connect_str=self.get_sqlplus_str(dbhome,inst_sid,osuser,"sys",None,None,None,None,None,None,None) + sqlcmd=''' + set heading off; + set pagesize 0; + alter system set remote_listener='{0}:{1},(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST={2})(PORT={3}))))' scope=both; + alter system register; + alter system register; + exit; + '''.format(scanname,scanport,cmanhost,cmanport) + self.set_mask_str(self.get_sys_passwd()) + output,error,retcode=self.run_sqlplus(connect_str,sqlcmd,None) + self.log_info_message("Calling check_sql_err() to validate the sql command return status",self.file_name) + self.check_sql_err(output,error,retcode,None) + self.unset_mask_str() + +######## Set Remote Listener ###### + def set_local_listener(self): + """ + This function set the remote listener + """ + if self.check_key("LOCAL_LISTENER",self.ora_env_dict): + lsnrstr=self.ora_env_dict["LOCAL_LISTENER"].split(";") + for str1 in lsnrstr: + if len(str1.split(":")) == 2: + hname=(str1.split(":")[0]).strip() + lport=(str1.split(":")[1]).strip() + osuser,dbhome,dbbase,oinv=self.get_db_params() + dbname,osid,dbuname=self.getdbnameinfo() + hostname = self.get_public_hostname() + inst_sid=self.get_inst_sid(osuser,dbhome,osid,hostname) + connect_str=self.get_sqlplus_str(dbhome,inst_sid,osuser,"sys",None,None,None,None,None,None,None) + dbsid=self.get_host_dbsid(hname,connect_str) + svcdom=self.get_svc_domain(hname) + hname1=svcdom + lstr='''(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST={0})(PORT={1}))))'''.format(hname1,lport) + dbsid1 = re.sub(r"[\n\t\s]*", "", dbsid) + self.log_info_message("the local_listener string set to : " + lstr, self.file_name) + sqlcmd=''' + set heading off; + set pagesize 0; + alter system set local_listener='{0}' scope=both sid='{1}'; + alter system register; + alter system register; + exit; + '''.format(lstr,dbsid1) + self.set_mask_str(self.get_sys_passwd()) + output,error,retcode=self.run_sqlplus(connect_str,sqlcmd,None) + self.log_info_message("Calling check_sql_err() to validate the sql command return status",self.file_name) + self.check_sql_err(output,error,retcode,None) + self.unset_mask_str() + +######## Complete RAC Setup + def rac_setup_complete(self): + """ + This function complete the RAC setup by creating a table inside the DB + """ + osuser,dbhome,dbbase,oinv=self.get_db_params() + dbname,osid,dbuname=self.getdbnameinfo() + hostname = self.get_public_hostname() + inst_sid=self.get_inst_sid(osuser,dbhome,osid,hostname) + connect_str=self.get_sqlplus_str(dbhome,inst_sid,osuser,"sys",None,None,None,None,None,None,None) + sqlcmd=''' + set heading off + set feedback off + create table system.racsetup (status varchar2(10)); + insert into system.racsetup values('completed'); + commit; + exit; + ''' + self.set_mask_str(self.get_sys_passwd()) + output,error,retcode=self.run_sqlplus(connect_str,sqlcmd,None) + self.log_info_message("Calling check_sql_err() to validate the sql command return status",self.file_name) + self.check_sql_err(output,error,retcode,None) + self.unset_mask_str() + +####### Setup Primary for standby + def set_primary_for_standby(self): + """ + Perform the task on primary for standby + """ + dgname=self.ora_env_dict["CRS_ASM_DISKGROUP"] if self.check_key("CRS_ASM_DISKGROUP",self.ora_env_dict) else "+DATA" + dbrdest=self.ora_env_dict["DB_RECOVERY_FILE_DEST"] if self.check_key("DB_RECOVERY_FILE_DEST",self.ora_env_dict) else dgname + dbrdestsize=self.ora_env_dict["DB_RECOVERY_FILE_DEST_SIZE"] if self.check_key("DB_RECOVERY_FILE_DEST_SIZE",self.ora_env_dict) else "10G" + dbname,osid,dbuname=self.getdbnameinfo() + + osuser,dbhome,dbbase,oinv=self.get_db_params() + dbname,osid,dbuname=self.getdbnameinfo() + hostname = self.get_public_hostname() + inst_sid=self.get_inst_sid(osuser,dbhome,osid,hostname) + connect_str=self.get_dgmgr_str(dbhome,inst_sid,osuser,"sys",None,None,None,None,None,None,None) + dgcmd=''' + PREPARE DATABASE FOR DATA GUARD + WITH DB_UNIQUE_NAME IS {0} + DB_RECOVERY_FILE_DEST IS "{1}" + DB_RECOVERY_FILE_DEST_SIZE is {2} + BROKER_CONFIG_FILE_1 IS "{3}" + BROKER_CONFIG_FILE_2 IS "{3}"; + exit; + '''.format(dbuname,dbrdest,dbrdestsize,dbrdest) + output,error,retcode=self.run_sqlplus(connect_str,dgcmd,None) + self.log_info_message("Calling check_sql_err() to validate the sql command return status",self.file_name) + self.check_dgmgrl_err(output,error,retcode,True) + +######## Check INV Home ######## + def check_home_inv(self,node,dbhome,dbuser): + """ + This function the db home with inventory + """ + if not node: + cmd='''su - {0} -c "{1}/OPatch/opatch lsinventory"'''.format(dbuser,dbhome) + else: + cmd='''su - {0} -c "ssh {2} '{1}/OPatch/opatch lsinventory'"'''.format(dbuser,dbhome,node) + + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,None) + if self.check_substr_match(output,"OPatch succeeded"): + return True + else: + return False + +######## Process delete node param variables ############## + def del_node_params(self,key): + """ + This function process DEL_PARAMS and set the keys + """ + cvar_str=self.ora_env_dict[key] + cvar_dict=dict(item.split("=") for item in cvar_str.split(";")) + for ckey in cvar_dict.keys(): + if ckey == 'del_rachome': + if self.check_key("DEL_RACHOME",self.ora_env_dict): + self.ora_env_dict["DEL_RACHOME"]="true" + else: + self.ora_env_dict=self.add_key("DEL_RACHOME","true",self.ora_env_dict) + if ckey == 'del_gridnode': + if self.check_key("DEL_GRIDNODE",self.ora_env_dict): + self.ora_env_dict["DEL_GRIDNODE"]="true" + else: + self.ora_env_dict=self.add_key("DEL_GRIDNODE","true",self.ora_env_dict) + +######## Process delete node param variables ############## + def populate_existing_cls_nodes(self): + """ + This function populate the nodes witht he existing cls nodes + """ + hostname=self.get_public_hostname() + crs_node_list=self.get_existing_cls_nodes(hostname,hostname) + if self.check_key("EXISTING_CLS_NODE",self.ora_env_dict): + self.ora_env_dict["EXISTING_CLS_NODE"]=crs_node_list + else: + self.ora_env_dict=self.add_key("EXISTING_CLS_NODE",crs_node_list,self.ora_env_dict) + +######## Run the custom scripts ############## + def run_custom_scripts(self,dirkey,filekey,user): + """ + This function run the custom scripts after Grid or DB setup based on env variables + """ +# self.log_info_message("Inside run_custom_scripts()",self.file_name) + if self.check_key(dirkey,self.ora_env_dict): + scrdir=self.ora_env_dict[dirkey] + if self.check_key(filekey,self.ora_env_dict): + scrfile=self.ora_env_dict[filekey] + script_file = '''{0}/{1}'''.format(scrdir,scrfile) + if os.path.isfile(script_file): + msg='''Custom script exist {0}'''.format(script_file) + self.log_info_message(msg,self.file_name) + cmd='''su - {0} -c "sh {0}"'''.format(user,script_file) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) +# else: +# self.log_info_message("Custom script dir is specified " + self.ora_env_dict[dirkey] + " but no user script file is specified. Not executing any user specified script.",self.file_name) +# else: +# self.log_info_message("No custom script dir specified to execute user specified scripts. Not executing any user specified script.",self.file_name) + +######### Synching Oracle Home + def sync_gi_home(self,node,ohome,user): + """ + This home sync GI home during addnode from source machine to remote machine + """ + install_node,pubhost=self.get_installnode() + cmd='''su - {0} -c "ssh {1} 'rsync -Pav -e ssh --exclude \'{1}*\' {3}/* {0}@{2}:{3}'"'''.format(user,node,install_node,ohome) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,False) + +######## Set the User profiles + def set_user_profile(self,ouser,key,val,type): + """ + This function run the custom scripts after Grid or DB setup based on env variables + """ + match=None + bashrc='''/home/{0}/.bashrc'''.format(ouser) + fdata=self.read_file(bashrc) + + match=re.search(key,fdata,re.MULTILINE) + #if not match: + if type=="export": + cmd='''echo "export {0}={1}" >> {2}'''.format(key,val,bashrc) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + if type=="alias": + cmd='''echo "alias {0}='{1}'" >> {2}'''.format(key,val,bashrc) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,True) + +###### Reading grid Resonsefile + + # Update the env variables dictionary from the values in the grid response file ( if provided ) + + def update_gi_env_vars_from_rspfile(self): + """ + Update GI env vars as key value pair from the responsefile ( if provided ) + """ + gridrsp=None + privHost=None + privIP=None + privDomain=None + cls_nodes=None + + if self.check_key("GRID_RESPONSE_FILE",self.ora_env_dict): + gridrsp=self.ora_env_dict["GRID_RESPONSE_FILE"] + self.log_info_message("GRID_RESPONSE_FILE parameter is set and file location is:" + gridrsp ,self.file_name) + + if os.path.isfile(gridrsp): + with open(gridrsp) as fp: + for line in fp: + if len(line.split("=")) == 2: + key=(line.split("=")[0]).strip() + value=(line.split("=")[1]).strip() + self.log_info_message("KEY and Value pair set to: " + key + ":" + value ,self.file_name) + if (key == "INVENTORY_LOCATION"): + if self.check_key("INVENTORY",self.ora_env_dict): + self.ora_env_dict=self.update_key("INVENTORY",value,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("INVENTORY",value,self.ora_env_dict) + elif (key == "ORACLE_BASE"): + if self.check_key("GRID_BASE",self.ora_env_dict): + self.ora_env_dict=self.update_key("GRID_BASE",value,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("GRID_BASE",value,self.ora_env_dict) + elif (key == "scanName"): + if self.check_key("SCAN_NAME",self.ora_env_dict): + self.ora_env_dict=self.update_key("SCAN_NAME",value,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("SCAN_NAME",value,self.ora_env_dict) + elif (key == "diskString"): + if self.check_key("CRS_ASM_DISCOVERY_DIR",self.ora_env_dict): + self.ora_env_dict=self.update_key("CRS_ASM_DISCOVERY_DIR",value,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("CRS_ASM_DISCOVERY_DIR",value,self.ora_env_dict) + elif (key == "diskList"): + if self.check_key("CRS_ASM_DEVICE_LIST",self.ora_env_dict): + self.ora_env_dict=self.update_key("CRS_ASM_DEVICE_LIST",value,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("CRS_ASM_DEVICE_LIST",value,self.ora_env_dict) + elif (key == "clusterNodes"): + install_node_flag=False + for crs_node in value.split(","): + installNode=(crs_node.split(":"))[0].strip() + installVIPNode=(crs_node.split(":"))[1].strip() + cls_node='''pubhost:{0},viphost:{1}'''.format(installNode,installVIPNode) + self.log_info_message("cls_node set to : " + cls_node,self.file_name) + if cls_nodes is None: + cls_nodes=cls_node + ';' + else: + cls_nodes= cls_nodes + cls_node + ';' + self.log_info_message("cls_nodes set to : " + cls_nodes,self.file_name) + if not install_node_flag: + if self.check_key("INSTALL_NODE",self.ora_env_dict): + self.ora_env_dict=self.update_key("INSTALL_NODE",installNode,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("INSTALL_NODE",installNode,self.ora_env_dict) + install_node_flag=True + self.log_info_message("Install node set to :" + self.ora_env_dict["INSTALL_NODE"], self.file_name) + elif (key == "redundancy"): + if self.check_key("CRS_ASMDG_REDUNDANCY ",self.ora_env_dict): + self.ora_env_dict=self.update_key("CRS_ASMDG_REDUNDANCY ",value,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("CRS_ASMDG_REDUNDANCY ",value,self.ora_env_dict) + else: + pass + + #crsNodes=cls_nodes[:-1] if cls_nodes[:-1]==';' else cls_nodes + self.log_info_message("cls_nodes set to : " + cls_nodes,self.file_name) + crsNodes=cls_nodes.rstrip(cls_nodes[-1]) + if self.check_key("CRS_NODES",self.ora_env_dict): + self.ora_env_dict=self.update_key("CRS_NODES",crsNodes,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("CRS_NODES",crsNodes,self.ora_env_dict) + + else: + self.log_error_message("Grid response file does not exist at its location: " + gridrsp + ".Exiting..",self.file_name) + self.prog_exit("127") + + + + def update_pre_23c_gi_env_vars_from_rspfile(self): + """ + Update GI env vars as key value pair from the responsefile ( if provided ) + """ + gridrsp=None + privHost=None + privIP=None + privDomain=None + cls_nodes=None + + if self.check_key("GRID_RESPONSE_FILE",self.ora_env_dict): + gridrsp=self.ora_env_dict["GRID_RESPONSE_FILE"] + self.log_info_message("GRID_RESPONSE_FILE parameter is set and file location is:" + gridrsp ,self.file_name) + + if os.path.isfile(gridrsp): + with open(gridrsp) as fp: + for line in fp: + if len(line.split("=")) == 2: + key=(line.split("=")[0]).strip() + value=(line.split("=")[1]).strip() + self.log_info_message("KEY and Value pair set to: " + key + ":" + value ,self.file_name) + if (key == "INVENTORY_LOCATION"): + if self.check_key("INVENTORY",self.ora_env_dict): + self.ora_env_dict=self.update_key("INVENTORY",value,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("INVENTORY",value,self.ora_env_dict) + elif (key == "ORACLE_BASE"): + if self.check_key("GRID_BASE",self.ora_env_dict): + self.ora_env_dict=self.update_key("GRID_BASE",value,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("GRID_BASE",value,self.ora_env_dict) + elif (key == "oracle.install.crs.config.gpnp.scanName"): + if self.check_key("SCAN_NAME",self.ora_env_dict): + self.ora_env_dict=self.update_key("SCAN_NAME",value,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("SCAN_NAME",value,self.ora_env_dict) + elif (key == "oracle.install.asm.diskGroup.diskDiscoveryString"): + if self.check_key("CRS_ASM_DISCOVERY_DIR",self.ora_env_dict): + self.ora_env_dict=self.update_key("CRS_ASM_DISCOVERY_DIR",value,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("CRS_ASM_DISCOVERY_DIR",value,self.ora_env_dict) + elif (key == "oracle.install.asm.diskGroup.disks"): + if self.check_key("CRS_ASM_DEVICE_LIST",self.ora_env_dict): + self.ora_env_dict=self.update_key("CRS_ASM_DEVICE_LIST",value,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("CRS_ASM_DEVICE_LIST",value,self.ora_env_dict) + elif (key == "oracle.install.crs.config.clusterNodes"): + install_node_flag=False + for crs_node in value.split(","): + installNode=(crs_node.split(":"))[0].strip() + installVIPNode=(crs_node.split(":"))[1].strip() + cls_node='''pubhost:{0},viphost:{1}'''.format(installNode,installVIPNode) + self.log_info_message("cls_node set to : " + cls_node,self.file_name) + if cls_nodes is None: + cls_nodes=cls_node + ';' + else: + cls_nodes= cls_nodes + cls_node + ';' + self.log_info_message("cls_nodes set to : " + cls_nodes,self.file_name) + if not install_node_flag: + if self.check_key("INSTALL_NODE",self.ora_env_dict): + self.ora_env_dict=self.update_key("INSTALL_NODE",installNode,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("INSTALL_NODE",installNode,self.ora_env_dict) + install_node_flag=True + self.log_info_message("Install node set to :" + self.ora_env_dict["INSTALL_NODE"], self.file_name) + elif (key == "oracle.install.asm.diskGroup.redundancy"): + if self.check_key("CRS_ASMDG_REDUNDANCY ",self.ora_env_dict): + self.ora_env_dict=self.update_key("CRS_ASMDG_REDUNDANCY ",value,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("CRS_ASMDG_REDUNDANCY ",value,self.ora_env_dict) + elif (key == "oracle.install.asm.diskGroup.AUSize"): + if self.check_key("CRS_ASMDG_AU_SIZE ",self.ora_env_dict): + self.ora_env_dict=self.update_key("CRS_ASMDG_AU_SIZE ",value,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("CRS_ASMDG_AU_SIZE ",value,self.ora_env_dict) + else: + pass + + #crsNodes=cls_nodes[:-1] if cls_nodes[:-1]==';' else cls_nodes + self.log_info_message("cls_nodes set to : " + cls_nodes,self.file_name) + crsNodes=cls_nodes.rstrip(cls_nodes[-1]) + if self.check_key("CRS_NODES",self.ora_env_dict): + self.ora_env_dict=self.update_key("CRS_NODES",crsNodes,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("CRS_NODES",crsNodes,self.ora_env_dict) + + else: + self.log_error_message("Grid response file does not exist at its location: " + gridrsp + ".Exiting..",self.file_name) + self.prog_exit("127") + + + def update_rac_env_vars_from_rspfile(self,dbcarsp): + """ + Update RAC env vars as key value pair from the responsefile ( if provided ) + """ + if os.path.isfile(dbcarsp): + with open(dbcarsp) as fp: + for line in fp: + msg="Read from dbca.rsp: line=" + line + self.log_info_message(msg,self.file_name) + if len(line.split("=",1)) == 2: + key=(line.split("=")[0]).strip() + value=(line.split("=")[1]).strip() + msg="key=" + key + ".. value=" + value + self.log_info_message(msg,self.file_name) + if (key == "gdbName"): + if self.check_key("DB_NAME",self.ora_env_dict): + self.ora_env_dict=self.update_key("DB_NAME",value,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("DB_NAME",value,self.ora_env_dict) + elif (key == "datafileDestination"): + if value != "": + dg = (re.search("\+(.+?)/.*",value)).group(1) + if self.check_key("DB_DATA_FILE_DEST",self.ora_env_dict): + self.ora_env_dict=self.update_key("DB_DATA_FILE_DEST",dg,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("DB_DATA_FILE_DEST",dg,self.ora_env_dict) + elif (key == "recoveryAreaDestination"): + if value != "" : + dg = (re.search("\+(.+?)/.*",value)).group(1) + if self.check_key("DB_RECOVERY_FILE_DEST",self.ora_env_dict): + self.ora_env_dict=self.update_key("DB_RECOVERY_FILE_DEST",dg,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("DB_RECOVERY_FILE_DEST",dg,self.ora_env_dict) + + elif (key == "variables"): + variablesvalue=(re.search("variables=(.*)",line)).group(1) + if variablesvalue: + dbUniqueStr=(re.search("(DB_UNIQUE_NAME=.+?),.*",variablesvalue)).group(1) + if dbUniqueStr: + dbUniqueValue=(dbUniqueStr.split("=")[1]).strip() + if self.check_key("DB_UNIQUE_NAME",self.ora_env_dict): + self.ora_env_dict=self.update_key("DB_UNIQUE_NAME",dbUniqueValue,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("DB_UNIQUE_NAME",dbUniqueValue,self.ora_env_dict) + dbHomeStr=(re.search("(ORACLE_HOME=.+?),.*",variablesvalue)).group(1) + if dbHomeStr: + dbHomeValue=(dbHomeStr.split("=")[1]).strip() + if self.check_key("DB_HOME",self.ora_env_dict): + self.ora_env_dict=self.update_key("DB_HOME",dbHomeValue,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("DB_HOME",dbHomeValue,self.ora_env_dict) + dbBaseStr=(re.search("(ORACLE_BASE=.+?),.*",variablesvalue)).group(1) + if dbBaseStr: + dbBaseValue=(dbBaseStr.split("=")[1]).strip() + if self.check_key("DB_BASE",self.ora_env_dict): + self.ora_env_dict=self.update_key("DB_BASE",dbBaseValue,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("DB_BASE",dbBaseValue,self.ora_env_dict) + else: + pass + + else: + self.log_error_message("dbca response file does not exist at its location: " + dbcarsp + ".Exiting..",self.file_name) + self.prog_exit("127") + + + # Update the env variables dictionary from the values in the grid response file ( if provided ) + def update_domainfrom_resolvconf_file(self): + """ + Update domain variables + """ + privDomain=None + pubDomain=None + ## Update DNS_SERVERS from /etc/resolv.conf + if os.path.isfile("/etc/resolv.conf"): + fdata=self.read_file("/etc/resolv.conf") + str=re.search("nameserver\s+(.+?)\s+",fdata) + if str: + dns_server=str.group(1) + if self.check_key("DNS_SERVERS",self.ora_env_dict): + self.ora_env_dict=self.update_key("DNS_SERVERS",dns_server,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("DNS_SERVERS",dns_server,self.ora_env_dict) + + domains=(re.search("search\s+(.*)",fdata)).group(1) + cmd="echo " + domains + " | cut -d' ' -f1" + output,error,retcode=self.execute_cmd(cmd,None,None) + pubDomain=output.strip() + self.log_info_message("Domain set to :" + pubDomain, self.file_name) + self.check_os_err(output,error,retcode,True) + if self.check_key("PUBLIC_HOSTS_DOMAIN",self.ora_env_dict): + self.ora_env_dict=self.update_key("PUBLIC_HOSTS_DOMAIN",pubDomain,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("PUBLIC_HOSTS_DOMAIN",pubDomain,self.ora_env_dict) + +######## set DG Prefix Function + def setdgprefix(self,dgname): + """ + add dg prefix + """ + dgflag = dgname.startswith("+") + + if not dgflag: + dgname= "+" + dgname + self.log_info_message("The dgname set to : " + dgname, self.file_name) + + return dgname + +######## rm DG Prefix Function + def rmdgprefix(self,dgname): + """ + rm dg prefix + """ + dgflag = dgname.startswith("+") + + if dgflag: + return dgname[1:] + else: + return dgname + +###### Get SID, dbname,dbuname + def getdbnameinfo(self): + """ + this function returns the sid,dbname,dbuname + """ + dbname=self.ora_env_dict["DB_NAME"] if self.check_key("DB_NAME",self.ora_env_dict) else "ORCLCDB" + osid=dbname + dbuname=self.ora_env_dict["DB_UNIQUE_NAME"] if self.check_key("DB_UNIQUE_NAME",self.ora_env_dict) else dbname + + return dbname,osid,dbuname + +###### function to return DG Name for CRS + def getcrsdgname(self): + """ + return CRS DG NAME + """ + return self.ora_env_dict["CRS_ASM_DISKGROUP"] if self.check_key("CRS_ASM_DISKGROUP",self.ora_env_dict) else "+DATA" + + +###### function to return DG Name for DATAFILE + def getdbdestdgname(self,dgname): + """ + return DB DG NAME + """ + return self.ora_env_dict["DB_DATA_FILE_DEST"] if self.check_key("DB_DATA_FILE_DEST",self.ora_env_dict) else dgname + +###### function to return DG Name for RECOVERY DESTINATION + def getdbrdestdgname(self,dgname): + """ + return RECO DG NAME + """ + return self.ora_env_dict["DB_RECOVERY_FILE_DEST"] if self.check_key("DB_RECOVERY_FILE_DEST",self.ora_env_dict) else dgname + +##### Function to catalog the backup + def catalog_bkp(self): + """ + catalog the backup + """ + osuser,dbhome,dbbase,oinv=self.get_db_params() + osid=self.ora_env_dict["GOLD_SID_NAME"] + rmanlogincmd=self.get_rman_str(dbhome,osid,osuser,"sys",None,None,None,osid,None,None,None) + rmancmd=''' + catalog start with '{0}' noprompt; + '''.format(self.ora_env_dict["GOLD_DB_BACKUP_LOC"]) + self.log_info_message("Running the rman command to catalog the backup: " + rmancmd,self.file_name) + output,error,retcode=self.run_sqlplus(rmanlogincmd,rmancmd,None) + self.log_info_message("Calling check_sql_err() to validate the rman command return status",self.file_name) + self.check_sql_err(output,error,retcode,True) + +#### Function to validate the backup + def check_bkp(self): + """ + Check the backup + """ + pass + +#### Function to validate the backup + def restore_bkp(self,dgname): + """ + restore the backup + """ + osuser,dbhome,dbbase,oinv=self.get_db_params() + osid=self.ora_env_dict["GOLD_SID_NAME"] + dbname=self.ora_env_dict["GOLD_DB_NAME"] + self.log_info_message("In restore_bkp() : dgname=[" + dgname + "]", self.file_name) + rmanlogincmd=self.get_rman_str(dbhome,osid,osuser,"sys",None,None,None,osid,None,None,None) + rmancmd=''' + run {{ + restore controlfile from '{2}'; + alter database mount; + set newname for database to '{0}'; + restore database; + switch datafile all; + alter database open resetlogs; + alter pluggable database {1} open read write; + }} + '''.format(dgname,self.ora_env_dict["GOLD_PDB_NAME"],"/oradata/orclcdb_bkp/spfile" + dbname + ".ora") + self.log_info_message("Running the rman command to restore the controlfile and datafiles from the backup: " + rmancmd,self.file_name) + output,error,retcode=self.run_sqlplus(rmanlogincmd,rmancmd,None) + self.log_info_message("Calling check_sql_err() to validate the rman command return status",self.file_name) + self.check_sql_err(output,error,retcode,True) + +#### Function restore the spfile + def restore_spfile(self): + """ + Restore the spfile + """ + osuser,dbhome,dbbase,oinv=self.get_db_params() + osid=self.ora_env_dict["GOLD_SID_NAME"] + dbname=self.ora_env_dict["GOLD_DB_NAME"] + rmanlogincmd=self.get_rman_str(dbhome,osid,osuser,"sys",None,None,None,osid,None,None,None) + rmancmd=''' + restore spfile from '{0}'; + '''.format(self.ora_env_dict["GOLD_DB_BACKUP_LOC"] + "/spfile" + dbname + ".ora") + self.log_info_message("Running the rman command to restore the spfile from the backup: " + rmancmd,self.file_name) + output,error,retcode=self.run_sqlplus(rmanlogincmd,rmancmd,None) + self.log_info_message("Calling check_sql_err() to validate the rman command return status",self.file_name) + self.check_sql_err(output,error,retcode,True) + +#### Set cluster mode to true or false + def set_cluster_mode(self,pfile,cflag): + """ + This function sets the cluster mode to true or false in the pfile + """ + cmd='''sed -i "s/*.cluster_database=.*/*.cluster_database={0}/g" {1}'''.format(cflag,pfile) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,False) + +#### Change the dbname in the parameter file to the new dbname + def change_dbname(self,pfile,newdbname): + """ + This function sets the resets the dbname to newdbname in the pfile + """ + osuser,dbhome,dbbase,oinv=self.get_db_params() + olddbname=self.ora_env_dict["GOLD_DB_NAME"] + osid=self.ora_env_dict["GOLD_SID_NAME"] + cmd='''su - {3} -c "export ORACLE_SID={2};export ORACLE_HOME={1};echo Y | {1}/bin/nid target=/ dbname={0}"'''.format(newdbname,dbhome,osid,osuser) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,False) + + self.set_cluster_mode(pfile,True) + cmd='''sed -i "s/*.db_name=.*/*.db_name={0}/g" {1}'''.format(newdbname,pfile) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,False) + cmd='''sed -i "s/*.db_unique_name=.*/*.db_unique_name={0}/g" {1}'''.format(newdbname,pfile) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,False) + cmd='''sed -i "s/{0}\(.*\).instance_number=\(.*\)/{1}\\1.instance_number=\\2/g" {2}'''.format(olddbname,newdbname,pfile) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,False) \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oracvu.py b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oracvu.py new file mode 100644 index 0000000000..d7d8aa190c --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oracvu.py @@ -0,0 +1,261 @@ +#!/usr/bin/python3 + +############################# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ + +""" + This file contains to the code call different classes objects based on setup type +""" + +from oralogger import * +from oraenv import * +from oracommon import * +from oramachine import * +from orasetupenv import * +from orasshsetup import * + +import os +import sys + +class OraCvu: + """ + This class performs the CVU checks + """ + def __init__(self,oralogger,orahandler,oraenv,oracommon): + try: + self.ologger = oralogger + self.ohandler = orahandler + self.oenv = oraenv.get_instance() + self.ocommon = oracommon + self.ora_env_dict = oraenv.get_env_vars() + self.file_name = os.path.basename(__file__) + except BaseException as ex: + ex_type, ex_value, ex_traceback = sys.exc_info() + trace_back = sys.tracebacklimit.extract_tb(ex_traceback) + stack_trace = list() + for trace in trace_back: + stack_trace.append("File : %s , Line : %d, Func.Name : %s, Message : %s" % (trace[0], trace[1], trace[2], trace[3])) + self.ocommon.log_info_message(ex_type.__name__,self.file_name) + self.ocommon.log_info_message(ex_value,self.file_name) + self.ocommon.log_info_message(stack_trace,self.file_name) + + def setup(self): + """ + This function setup the grid on this machine + """ + pass + + def node_reachability_checks(self,checktype,user,ctype): + """ + This function performs the cluvfy checks + """ + exiting_cls_node="" + if ctype == 'ADDNODE': + exiting_cls_node=self.ocommon.get_existing_clu_nodes(True) + + if self.ocommon.check_key("CRS_NODES",self.ora_env_dict): + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + if checktype=="private": + crs_nodes=priv_nodes.replace(" ",",") + else: + crs_nodes=pub_nodes.replace(" ",",") + if exiting_cls_node: + crs_nodes = crs_nodes + "," + exiting_cls_node + + nwmask,nwsubnet,nwname=self.ocommon.get_nwlist(checktype) + self.cluvfy_nodereach(crs_nodes,nwname,user) + + + def node_connectivity_checks(self,checktype,user,ctype): + """ + This function performs the cluvfy checks + """ + exiting_cls_node="" + if ctype == 'ADDNODE': + exiting_cls_node=self.ocommon.get_existing_clu_nodes(True) + + if self.ocommon.check_key("CRS_NODES",self.ora_env_dict): + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + if checktype=="private": + crs_nodes=priv_nodes.replace(" ",",") + else: + crs_nodes=pub_nodes.replace(" ",",") + if exiting_cls_node: + crs_nodes = crs_nodes + "," + exiting_cls_node + + nwmask,nwsubnet,nwname=self.ocommon.get_nwlist(checktype) + self.cluvfy_nodereach(crs_nodes,nwname,user) + + def cluvfy_nodereach(self,crs_nodes,nwname,user): + """ + This function performs the cluvfy checks + """ + ohome=self.ora_env_dict["GRID_HOME"] + self.ocommon.log_info_message("Performing cluvfy check to perform node reachability.",self.file_name) + cmd='''su - {2} -c "{1}/runcluvfy.sh comp nodereach -n {0} -verbose"'''.format(crs_nodes,ohome,user) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + + def cluvfy_nodecon(self,crs_nodes,nwname,user): + """ + This function performs the cluvfy checks + """ + ohome=self.ora_env_dict["GRID_HOME"] + self.ocommon.log_info_message("Performing cluvfy check to perform node connectivty.",self.file_name) + cmd='''su - {3} -c "{1}/runcluvfy.sh comp nodecon -n {0} -networks {2} -verbose"'''.format(crs_nodes,ohome,nwname,user) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + + def cluvfy_compsys(self,ctype,user): + """ + This function performs the cluvfy comp sys checks + """ + ohome=self.ora_env_dict["GRID_HOME"] + self.ocommon.log_info_message("Performing cluvfy check to perform node connectivty.",self.file_name) + cmd='''su - {2} -c "{1}/runcluvfy.sh comp sys -n racnode6,racnode8 -p {0} -verbose"'''.format(ctype,ohome,user) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + + def cluvfy_checkrspfile(self,fname,ohome,user): + """ + This function performs the cluvfy check on a responsefile + """ + self.cluvfy_updcvucfg(ohome,user) + self.ocommon.log_info_message("Performing cluvfy check on a responsefile: " + fname,self.file_name) + cmd='''su - {0} -c "{1}/runcluvfy.sh stage -pre crsinst -responseFile {2} | tee -a {3}/cluvfy_check.txt"'''.format(user,ohome,fname,"/tmp") + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + if self.ocommon.check_key("IGNORE_CVU_CHECKS",self.ora_env_dict): + self.ocommon.check_os_err(output,error,retcode,None) + else: + self.ocommon.check_os_err(output,error,retcode,None) + + def cluvfy_updcvucfg(self,ohome,user): + """ + This function update the CVU config file with the correct CV_DESTLOC + """ + match=None + tmpdir=self.ocommon.get_tmpdir() + fname='''{0}/cv/admin/cvu_config'''.format(ohome) + self.ocommon.log_info_message("Updating CVU config file: " + fname,self.file_name) + fdata=self.ocommon.read_file(fname) + match=re.search("CV_DESTLOC=",fdata,re.MULTILINE) + if not match: + cmd='''su - {0} -c "echo CV_DESTLOC=\"{1}\" >> {2}"'''.format(user,tmpdir,fname) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + else: + cmd='''su - {0} -c "echo CV_DESTLOC=\"{1}\" >> {2}"'''.format(user,tmpdir,fname) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + + def check_ohasd(self,node): + """ + This function check if crs is configued properly + """ + giuser,gihome,gbase,oinv=self.ocommon.get_gi_params() + crs_nodes="" + if not node: + crs_nodes=" -allnodes " + else: + crs_nodes=" -n " + node + + cmd='''su - {0} -c "{1}/bin/cluvfy comp ohasd {2}"'''.format(giuser,gihome,crs_nodes) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + return retcode + + def check_asm(self,node): + """ + This function check if crs is configued properly + """ + giuser,gihome,gbase,oinv=self.ocommon.get_gi_params() + crs_nodes="" + if not node: + crs_nodes=" -allnodes " + else: + crs_nodes=" -n " + node + + cmd='''su - {0} -c "{1}/bin/cluvfy comp asm {2}"'''.format(giuser,gihome,crs_nodes) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + return retcode + + def check_clu(self,node,sshflag): + """ + This function check if crs is configued properly + """ + giuser,gihome,gbase,oinv=self.ocommon.get_gi_params() + crs_nodes="" + if not node: + crs_nodes=" -allnodes " + cmd='''su - {0} -c "{1}/bin/cluvfy comp clumgr {2}"'''.format(giuser,gihome,crs_nodes) + else: + crs_nodes=" -n " + node + cmd='''su - {0} -c "{1}/bin/cluvfy comp clumgr {2}"'''.format(giuser,gihome,crs_nodes) + + if sshflag: + crs_nodes=" -n " + node + cmd='''su - {0} -c "ssh {3} '{1}/bin/cluvfy comp clumgr {2}'"'''.format(giuser,gihome,crs_nodes,node) + + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + return retcode + + def check_home(self,node,home,user): + """ + This function check if crs is configued properly + """ + giuser,gihome,gbase,oinv=self.ocommon.get_gi_params() + if not node: + crs_nodes=" -allnodes " + else: + crs_nodes=" -n " + node + + cvufile='''{0}/bin/cluvfy'''.format(gihome) + if not self.ocommon.check_file(cvufile,True,None,None): + return 1 + + cmd='''su - {0} -c "{1}/bin/cluvfy comp software -d {3} -verbose"'''.format(user,gihome,node,home) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + if not self.ocommon.check_substr_match(output,"FAILED"): + return 0 + else: + return 1 + + def check_db_homecfg(self,node): + """ + This function check if db home is configured properly + """ + giuser,gihome,gbase,oinv=self.ocommon.get_gi_params() + dbuser,dbhome,dbbase,oinv=self.ocommon.get_db_params() + + if not node: + crs_nodes=" -allnodes " + else: + crs_nodes=" -n " + node + + cmd='''su - {0} -c "{1}/bin/cluvfy stage -pre dbcfg {2} -d {3}"'''.format(dbuser,gihome,crs_nodes,dbhome) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + return retcode + + def check_addnode(self): + """ + This function check if the node can be added + """ + exiting_cls_node=self.ocommon.get_existing_clu_nodes(True) + giuser,gihome,gbase,oinv=self.ocommon.get_gi_params() + node=exiting_cls_node.split(",")[0] + tmpdir=self.ocommon.get_tmpdir() + if self.ocommon.check_key("CRS_NODES",self.ora_env_dict): + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + crs_nodes=pub_nodes.replace(" ",",") + cmd='''su - {0} -c "ssh {1} '{2}/runcluvfy.sh stage -pre nodeadd -n {3}'"'''.format(giuser,node,gihome,crs_nodes,tmpdir) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + if self.ocommon.check_key("IGNORE_CVU_CHECKS",self.ora_env_dict): + self.ocommon.log_info_message("Ignoring CVU checks failure as IGNORE_CVU_CHECKS set to ignore CVU checks.",self.file_name) + self.ocommon.check_os_err(output,error,retcode,None) + else: + self.ocommon.check_os_err(output,error,retcode,None) + diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraenv.py b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraenv.py new file mode 100644 index 0000000000..5bc6890c4f --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraenv.py @@ -0,0 +1,132 @@ +#!/usr/bin/python3 + +############################# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ + +""" + This file read the env variables from a file or using env command and populate them in variable +""" + +import os + +class OraEnv: + __instance = None + __env_var_file = '/etc/rac_env_vars' + __env_var_file_flag = None + __env_var_dict = {} + __ora_asm_diskgroup_name = '+DATA' + __ora_gimr_flag = 'false' + __ora_grid_user = 'grid' + __ora_db_user = 'oracle' + __ora_oinstall_group_name = 'oinstall' + encrypt_str__ = None + original_str__ = None + logdir__ = "/tmp/orod" + + def __init__(self): + """ Virtually private constructor. """ + if OraEnv.__instance != None: + raise Exception("This class is a singleton!") + else: + OraEnv.__instance = self + OraEnv.read_variable() + OraEnv.add_variable() + try: + os.mkdir(OraEnv.logdir__) + except OSError as error: + pass + + @staticmethod + def get_instance(): + """ Static access method. """ + if OraEnv.__instance == None: + OraEnv() + return OraEnv.__instance + + @staticmethod + def read_variable(): + """ Read the variables from a file into dict """ + if OraEnv.__env_var_file_flag: + with open(OraEnv.__env_var_file) as envfile: + for line in envfile: + name, var = line.partition("=")[::2] + OraEnv.__env_var_dict[name.strip()] = var + else: + OraEnv.__env_var_dict = os.environ + + @staticmethod + def add_variable(): + """ Add more variable ased on enviornment with default values in __env_var_dict""" + if "ORA_ASM_DISKGROUP_NAME" not in OraEnv.__env_var_dict: + OraEnv.__env_var_dict["ORA_ASM_DISKGROUP_NAME"] = "+DATA" + + if "ORA_GRID_USER" not in OraEnv.__env_var_dict: + OraEnv.__env_var_dict["GRID_USER"] = "grid" + + if "ORA_DB_USER" not in OraEnv.__env_var_dict: + OraEnv.__env_var_dict["DB_USER"] = "oracle" + + if "ORA_OINSTALL_GROUP_NAME" not in OraEnv.__env_var_dict: + OraEnv.__env_var_dict["OINSTALL"] = "oinstall" + + @staticmethod + def add_custom_variable(key,val): + """ Addcustom more variable passed from main.py values in __env_var_dict""" + if key not in OraEnv.__env_var_dict: + OraEnv.__env_var_dict[key] = val + + @staticmethod + def update_key(key,val): + """ Updating key variable passed from main.py values in __env_var_dict""" + OraEnv.__env_var_dict[key] = val + + @staticmethod + def get_env_vars(): + """ Static access method to get the env vars. """ + return OraEnv.__env_var_dict + + @staticmethod + def update_env_vars(env_dict): + """ Static access method to get the env vars. """ + OraEnv.__env_var_dict = env_dict + + @staticmethod + def get_env_dict(): + """ Static access method t return the dict. """ + return OraEnv.__env_var_dict + + @staticmethod + def logfile_name(file_type): + """ Static access method to return the logfile name. """ + if file_type == "NONE": + if "LOGFILE_NAME" not in OraEnv.__env_var_dict: + OraEnv.__env_var_dict["LOG_FILE_NAME"] = OraEnv.logdir__ + "/oracle_rac_setup.log" + elif file_type == "DEL_PARAMS": + OraEnv.__env_var_dict["LOG_FILE_NAME"] = OraEnv.logdir__ + "/oracle_rac_del.log" + elif file_type == "RESET_PASSWORD": + OraEnv.__env_var_dict["LOG_FILE_NAME"] = OraEnv.logdir__ + "/oracle_rac_reset_passwd.log" + elif file_type == "ADD_TNS": + OraEnv.__env_var_dict["LOG_FILE_NAME"] = OraEnv.logdir__ + "/oracle_rac_populate_tns_file.log" + elif file_type == "CHECK_RAC_INST": + OraEnv.__env_var_dict["LOG_FILE_NAME"] = OraEnv.logdir__ + "/oracle_check_rac_inst_file.log" + elif file_type == "CHECK_GI_LOCAL": + OraEnv.__env_var_dict["LOG_FILE_NAME"] = OraEnv.logdir__ + "/oracle_check_gi_local_file.log" + elif file_type == "CHECK_RAC_DB": + OraEnv.__env_var_dict["LOG_FILE_NAME"] = OraEnv.logdir__ + "/oracle_check_rac_db_file.log" + elif file_type == "CHECK_DB_ROLE": + OraEnv.__env_var_dict["LOG_FILE_NAME"] = OraEnv.logdir__ + "/oracle_check_db_role.log" + elif file_type == "CHECK_CONNECT_STR": + OraEnv.__env_var_dict["LOG_FILE_NAME"] = OraEnv.logdir__ + "/oracle_check_conn_str_file.log" + elif file_type == "CHECK_PDB_CONNECT_STR": + OraEnv.__env_var_dict["LOG_FILE_NAME"] = OraEnv.logdir__ + "/oracle_check_pdb_conn_str_file.log" + elif file_type == "SETUP_DB_LSNR": + OraEnv.__env_var_dict["LOG_FILE_NAME"] = OraEnv.logdir__ + "/setup_db_lsnr.log" + elif file_type == "SETUP_LOCAL_LSNR": + OraEnv.__env_var_dict["LOG_FILE_NAME"] = OraEnv.logdir__ + "/setup_local_lsnr.log" + else: + pass + + return OraEnv.__env_var_dict["LOG_FILE_NAME"] diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/orafactory.py b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/orafactory.py new file mode 100644 index 0000000000..66469b1d80 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/orafactory.py @@ -0,0 +1,165 @@ +#!/usr/bin/python3 + +############################# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ + +""" + This file contains to the code call different classes objects based on setup type +""" + +import os +import sys +sys.path.insert(0, "/opt/scripts/startup/scripts") + + +from oralogger import * +from oraenv import * +from oracommon import * +from oramachine import * +from oragiprov import * +from oragiadd import * +from orasshsetup import * +from oraracadd import * +from oraracprov import * +from oraracdel import * +from oramiscops import * + +class OraFactory: + """ + This is a class for calling child objects to setup RAC/DG/GRID/DB/Sharding based on OP_TYPE env variable. + + Attributes: + oralogger (object): object of OraLogger Class. + ohandler (object): object of Handler class. + oenv (object): object of singleton OraEnv class. + ocommon(object): object of OraCommon class. + ora_env_dict(dict): Dict of env variable populated based on env variable for the setup. + file_name(string): Filename from where logging message is populated. + """ + def __init__(self,oralogger,orahandler,oraenv,oracommon): + """ + This is a class for calling child objects to setup RAC/DG/GRID/DB based on OP_TYPE env variable. + + Attributes: + oralogger (object): object of OraLogger Class. + ohandler (object): object of Handler class. + oenv (object): object of singleton OraEnv class. + ocommon(object): object of OraCommon class. + ora_env_dict(dict): Dict of env variable populated based on env variable for the setup. + file_name(string): Filename from where logging message is populated. + """ + self.ologger = oralogger + self.ohandler = orahandler + self.oenv = oraenv.get_instance() + self.ocommon = oracommon + self.ocvu = OraCvu(self.ologger,self.ohandler,self.oenv,self.ocommon) + self.osetupssh = OraSetupSSH(self.ologger,self.ohandler,self.oenv,self.ocommon) + self.ora_env_dict = oraenv.get_env_vars() + self.file_name = os.path.basename(__file__) + def get_ora_objs(self): + ''' + Return the instance of a classes which will setup the enviornment. + + Returns: + ofactory_obj: List of objects + ''' + ofactory_obj = [] + + msg='''ora_env_dict set to : {0}'''.format(self.ora_env_dict) + self.ocommon.log_info_message(msg,self.file_name) + + msg='''Adding machine setup object in orafactory''' + self.ocommon.log_info_message(msg,self.file_name) + omachine=OraMachine(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) + self.ocommon.log_info_message(msg,self.file_name) + ofactory_obj.append(omachine) + + msg="Checking the OP_TYPE and Version to begin the installation" + self.ocommon.log_info_message(msg,self.file_name) + + # Checking the OP_TYPE + op_type=None + if self.ocommon.check_key("CUSTOM_RUN_FLAG",self.ora_env_dict): + if self.ocommon.check_key("OP_TYPE",self.ora_env_dict): + op_type=self.ora_env_dict["OP_TYPE"] + + self.ocommon.populate_rac_env_vars() + if self.ocommon.check_key("OP_TYPE",self.ora_env_dict): + if op_type is not None: + self.ocommon.update_key("OP_TYPE",op_type,self.ora_env_dict) + msg='''OP_TYPE variable is set to {0}.'''.format(self.ora_env_dict["OP_TYPE"]) + self.ocommon.log_info_message(msg,self.file_name) + else: + self.ora_env_dict=self.ocommon.add_key("OP_TYPE","nosetup",self.ora_env_dict) + msg="OP_TYPE variable is set to default nosetup. No value passed as an enviornment variable." + self.ocommon.log_info_message(msg,self.file_name) + + ## Calling this function from here to make sure INSTALL_NODE is set + self.ocommon.update_gi_env_vars_from_rspfile() + # Check the OP_TYPE value and call objects based on it value + install_node,pubhost=self.ocommon.get_installnode() + if install_node.lower() == pubhost.lower(): + if self.ora_env_dict["OP_TYPE"] == 'setupgrid': + msg="Creating and calling instance to provGrid" + ogiprov = OraGIProv(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) + self.ocommon.log_info_message(msg,self.file_name) + ofactory_obj.append(ogiprov) + elif self.ora_env_dict["OP_TYPE"] == 'setuprac': + msg="Creating and calling instance to prov RAC DB" + oracdb = OraRacProv(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) + self.ocommon.log_info_message(msg,self.file_name) + ofactory_obj.append(oracdb) + elif self.ora_env_dict["OP_TYPE"] == 'setupssh': + msg="Creating and calling instance to setup ssh between computes" + ossh = self.osetupssh + self.ocommon.log_info_message(msg,self.file_name) + ofactory_obj.append(ossh) + elif self.ora_env_dict["OP_TYPE"] == 'setupracstandby': + msg="Creating and calling instance to setup RAC standby database" + oracstdby = OraRacStdby(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) + self.ocommon.log_info_message(msg,self.file_name) + ofactory_obj.append(oracstdby) + elif self.ora_env_dict["OP_TYPE"] == 'gridaddnode': + msg="Creating and calling instance to add grid" + oaddgi = OraGIAdd(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) + self.ocommon.log_info_message(msg,self.file_name) + ofactory_obj.append(oaddgi) + elif self.ora_env_dict["OP_TYPE"] == 'racaddnode': + msg="Creating and calling instance to add RAC node" + oaddrac = OraRacAdd(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) + self.ocommon.log_info_message(msg,self.file_name) + ofactory_obj.append(oaddrac) + elif self.ora_env_dict["OP_TYPE"] == 'setupenv': + msg="Creating and calling instance to setup the racenv" + osetupenv = OraSetupEnv(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) + self.ocommon.log_info_message(msg,self.file_name) + ofactory_obj.append(osetupenv) + elif self.ora_env_dict["OP_TYPE"] == 'racdelnode': + msg="Creating and calling instance to delete the rac node" + oracdel = OraRacDel(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) + self.ocommon.log_info_message(msg,self.file_name) + ofactory_obj.append(oracdel) + elif self.ora_env_dict["OP_TYPE"] == 'miscops': + msg="Creating and calling instance to perform the miscellenous operations" + oramops = OraMiscOps(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) + self.ocommon.log_info_message(msg,self.file_name) + ofactory_obj.append(oramops) + else: + msg="OP_TYPE must be set to {setupgrid|setuprac|setupssh|setupracstandby|gridaddnode|racaddnode}" + self.ocommon.log_info_message(msg,self.file_name) + elif install_node.lower() != pubhost.lower() and self.ocommon.check_key("CUSTOM_RUN_FLAG",self.ora_env_dict): + if self.ora_env_dict["OP_TYPE"] == 'miscops': + msg="Creating and calling instance to perform the miscellenous operations" + oramops = OraMiscOps(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) + self.ocommon.log_info_message(msg,self.file_name) + ofactory_obj.append(oramops) + else: + msg="INSTALL_NODE {0} is not matching with the hostname {1}. Resetting OP_TYPE to nosetup.".format(install_node,pubhost) + self.ocommon.log_info_message(msg,self.file_name) + self.ocommon.update_key("OP_TYPE","nosetup",self.ora_env_dict) + + + return ofactory_obj diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oragiadd.py b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oragiadd.py new file mode 100644 index 0000000000..ab37e8ca2a --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oragiadd.py @@ -0,0 +1,314 @@ +#!/usr/bin/python3 + +############################# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ + +""" + This file contains to the code call different classes objects based on setup type +""" + +import os +import sys +import traceback + +from oralogger import * +from oraenv import * +from oracommon import * +from oramachine import * +from orasetupenv import * +from orasshsetup import * +from oracvu import * +from oragiprov import * + +class OraGIAdd: + """ + This class performs the CVU checks + """ + def __init__(self,oralogger,orahandler,oraenv,oracommon,oracvu,orasetupssh): + try: + self.ologger = oralogger + self.ohandler = orahandler + self.oenv = oraenv.get_instance() + self.ocommon = oracommon + self.ora_env_dict = oraenv.get_env_vars() + self.file_name = os.path.basename(__file__) + self.ocvu = oracvu + self.osetupssh = orasetupssh + self.ogiprov = OraGIProv(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) + + except BaseException as ex: + traceback.print_exc(file = sys.stdout) + + def setup(self): + """ + This function setup the grid on this machine + """ + self.ocommon.log_info_message("Start setup()",self.file_name) + ct = datetime.datetime.now() + bts = ct.timestamp() + giuser,gihome,obase,invloc=self.ocommon.get_gi_params() + pubhostname = self.ocommon.get_public_hostname() + retcode1=self.ocvu.check_home(pubhostname,gihome,giuser) + if retcode1 == 0: + bstr="Grid home is already installed on this machine" + self.ocommon.log_info_message(self.ocommon.print_banner(bstr),self.file_name) + if self.ocommon.check_key("GI_HOME_INSTALLED_FLAG",self.ora_env_dict): + bstr="Grid is already configured on this machine" + self.ocommon.log_info_message(self.ocommon.print_banner(bstr),self.file_name) + else: + self.env_param_checks() + self.ocommon.log_info_message("Start perform_ssh_setup()",self.file_name) + self.perform_ssh_setup() + self.ocommon.log_info_message("End perform_ssh_setup()",self.file_name) + if self.ocommon.check_key("COPY_GRID_SOFTWARE",self.ora_env_dict): + self.ocommon.log_info_message("Start crs_sw_install()",self.file_name) + self.ogiprov.crs_sw_install() + self.ocommon.log_info_message("End crs_sw_install()",self.file_name) + self.ogiprov.run_orainstsh() + self.ocommon.log_info_message("Start ogiprov.run_rootsh()",self.file_name) + self.ogiprov.run_rootsh() + self.ocommon.log_info_message("End ogiprov.run_rootsh()",self.file_name) + self.ocvu.check_addnode() + self.ocommon.log_info_message("Start crs_sw_configure()",self.file_name) + gridrsp=self.crs_sw_configure() + self.ocommon.log_info_message("End crs_sw_configure()",self.file_name) + self.run_orainstsh() + self.ocommon.log_info_message("Start run_rootsh()",self.file_name) + self.run_rootsh() + self.ocommon.log_info_message("End run_rootsh()",self.file_name) + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + crs_nodes=pub_nodes.replace(" ",",") + for node in crs_nodes.split(","): + self.clu_checks(node) + if self.ocommon.detect_k8s_env(): + self.ocommon.run_custom_scripts("CUSTOM_GRID_SCRIPT_DIR","CUSTOM_GRID_SCRIPT_FILE",giuser) + self.ocommon.update_scan(giuser,gihome,None,pubhostname) + self.ocommon.start_scan(giuser,gihome,pubhostname) + self.ocommon.update_scan_lsnr(giuser,gihome,pubhostname) + self.ocommon.start_scan_lsnr(giuser,gihome,pubhostname) + ct = datetime.datetime.now() + ets = ct.timestamp() + totaltime=ets - bts + self.ocommon.log_info_message("Total time for setup() = [ " + str(round(totaltime,3)) + " ] seconds",self.file_name) + + def env_param_checks(self): + """ + Perform the env setup checks + """ + self.scan_check() + self.ocommon.check_env_variable("GRID_HOME",True) + self.ocommon.check_env_variable("GRID_BASE",True) + self.ocommon.check_env_variable("INVENTORY",True) +# self.ocommon.check_env_variable("ASM_DISCOVERY_DIR",None) + + def scan_check(self): + """ + Check if scan is set + """ + if self.ocommon.check_key("GRID_RESPONSE_FILE",self.ora_env_dict): + self.ocommon.log_info_message("GRID_RESPONSE_FILE is set. Ignoring checking SCAN_NAME as CVU will validate responsefile",self.file_name) + else: + if self.ocommon.check_key("SCAN_NAME",self.ora_env_dict): + self.ocommon.log_info_message("SCAN_NAME variable is set: " + self.ora_env_dict["SCAN_NAME"],self.file_name) + # ipaddr=self.ocommon.get_ip(self.ora_env_dict["SCAN_NAME"]) + # status=self.ocommon.validate_ip(ipaddr) + # if status: + # self.ocommon.log_info_message("SCAN_NAME is a valid IP. Check passed...",self.file_name) + # else: + # self.ocommon.log_error_message("SCAN_NAME is not a valid IP. Check failed. Exiting...",self.file_name) + # self.ocommon.prog_exit("127") + # else: + # self.ocommon.log_error_message("SCAN_NAME is not set. Exiting...",self.file_name) + # self.ocommon.prog_exit("127") + + def clu_checks(self,hostname): + """ + Performing clu checks + """ + self.ocommon.log_info_message("Performing CVU checks before DB home installation to make sure clusterware is up and running",self.file_name) + retcode1=self.ocvu.check_ohasd(hostname) + retcode2=self.ocvu.check_asm(hostname) + retcode3=self.ocvu.check_clu(hostname,None) + + if retcode1 == 0: + msg="Cluvfy ohasd check passed!" + self.ocommon.log_info_message(msg,self.file_name) + else: + msg="Cluvfy ohasd check faild. Exiting.." + self.ocommon.log_error_message(msg,self.file_name) + self.ocommon.prog_exit("127") + + if retcode2 == 0: + msg="Cluvfy asm check passed!" + self.ocommon.log_info_message(msg,self.file_name) + else: + msg="Cluvfy asm check faild. Exiting.." + self.ocommon.log_error_message(msg,self.file_name) + self.ocommon.prog_exit("127") + + if retcode3 == 0: + msg="Cluvfy clumgr check passed!" + self.ocommon.log_info_message(msg,self.file_name) + else: + msg="Cluvfy clumgr check faild. Exiting.." + self.ocommon.log_error_message(msg,self.file_name) + self.ocommon.prog_exit("127") + + def perform_ssh_setup(self): + """ + Perform ssh setup + """ + if not self.ocommon.detect_k8s_env(): + user=self.ora_env_dict["GRID_USER"] + ohome=self.ora_env_dict["GRID_HOME"] + self.osetupssh.setupssh(user,ohome,'ADDNODE') + #if self.ocommon.check_key("VERIFY_SSH",self.ora_env_dict): + #self.osetupssh.verifyssh(user,'ADDNODE') + else: + self.ocommon.log_info_message("SSH setup must be already completed during env setup as this this k8s env.",self.file_name) + + def crs_sw_configure(self): + """ + This function performs the crs software install on all the nodes + """ + ohome=self.ora_env_dict["GRID_HOME"] + gridrsp="" + if self.ocommon.check_key("GRID_RESPONSE_FILE",self.ora_env_dict): + gridrsp=self.check_responsefile() + else: + gridrsp=self.prepare_responsefile() + + node="" + nodeflag=False + existing_crs_nodes=self.ocommon.get_existing_clu_nodes(True) + for cnode in existing_crs_nodes.split(","): + retcode3=self.ocvu.check_clu(cnode,True) + if retcode3 == 0: + node=cnode + nodeflag=True + break + + #self.ocvu.cluvfy_addnode(gridrsp,self.ora_env_dict["GRID_HOME"],self.ora_env_dict["GRID_USER"]) + if node: + user=self.ora_env_dict["GRID_USER"] + self.ocommon.scpfile(node,gridrsp,gridrsp,user) + status=self.ocommon.check_home_inv(None,ohome,user) + if status: + self.ocommon.sync_gi_home(node,ohome,user) + cmd=self.ocommon.get_sw_cmd("ADDNODE",gridrsp,node,None) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + self.ocommon.check_crs_sw_install(output) + else: + self.ocommon.log_error_message("Clusterware is not up on any node : " + existing_crs_nodes + ".Exiting...",self.file_name) + self.ocommon.prog_exit("127") + + return gridrsp + + def check_responsefile(self): + """ + This function returns the valid response file + """ + gridrsp=None + if self.ocommon.check_key("GRID_RESPONSE_FILE",self.ora_env_dict): + gridrsp=self.ora_env_dict["GRID_RESPONSE_FILE"] + self.ocommon.log_info_message("GRID_RESPONSE_FILE parameter is set and file location is:" + gridrsp ,self.file_name) + + if os.path.isfile(gridrsp): + return gridrsp + else: + self.ocommon.log_error_message("Grid response file does not exist at its location: " + gridrsp + ".Exiting..",self.file_name) + self.ocommon.prog_exit("127") + + def prepare_responsefile(self): + """ + This function prepare the response file if no response file passed + """ + self.ocommon.log_info_message("Preparing Grid responsefile.",self.file_name) + giuser,gihome,obase,invloc=self.ocommon.get_gi_params() + ## Variable Assignments + #asmstr="/dev/asm*" + x = datetime.datetime.now() + rspdata="" + gridrsp='''{1}/grid_addnode_{0}.rsp'''.format(x.strftime("%f"),"/tmp") + clunodes=self.ocommon.get_crsnodes() + node="" + nodeflag=False + existing_crs_nodes=self.ocommon.get_existing_clu_nodes(True) + for cnode in existing_crs_nodes.split(","): + retcode3=self.ocvu.check_clu(cnode,True) + if retcode3 == 0: + node=cnode + nodeflag=True + break + + if not nodeflag: + self.ocommon.log_error_message("Unable to find any existing healthy cluster node to verify the cluster status. This can be a ssh problem or cluster is not healthy. Error occurred!") + self.ocommon.prog_exit("127") + + oraversion=self.ocommon.get_rsp_version("ADDNODE",node) + + version=oraversion.split(".",1)[0].strip() + if int(version) < 23: + rspdata=''' + oracle.install.responseFileVersion=/oracle/install/rspfmt_dbinstall_response_schema_v{3} + oracle.install.option=CRS_ADDNODE + ORACLE_BASE={0} + INVENTORY_LOCATION={1} + oracle.install.asm.OSDBA=asmdba + oracle.install.asm.OSOPER=asmoper + oracle.install.asm.OSASM=asmadmin + oracle.install.crs.config.clusterNodes={2} + oracle.install.crs.rootconfig.configMethod=ROOT + oracle.install.asm.configureAFD=false + oracle.install.crs.rootconfig.executeRootScript=false + oracle.install.crs.configureRHPS={3} + '''.format(obase,invloc,clunodes,oraversion,"false") +# fdata="\n".join([s for s in rspdata.split("\n") if s]) + else: + rspdata=''' + oracle.install.responseFileVersion=/oracle/install/rspfmt_dbinstall_response_schema_v{3} + oracle.install.option=CRS_ADDNODE + ORACLE_BASE={0} + INVENTORY_LOCATION={1} + OSDBA=asmdba + OSOPER=asmoper + OSASM=asmadmin + clusterNodes={2} + configMethod=ROOT + configureAFD=false + executeRootScript=false + '''.format(obase,invloc,clunodes,oraversion,"false") + + self.ocommon.write_file(gridrsp,rspdata) + if os.path.isfile(gridrsp): + return gridrsp + else: + self.ocommon.log_error_message("Grid response file does not exist at its location: " + gridrsp + ".Exiting..",self.file_name) + self.ocommon.prog_exit("127") + + + def run_orainstsh(self): + """ + This function run the orainst after grid setup + """ + giuser,gihome,gbase,oinv=self.ocommon.get_gi_params() + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + for node in pub_nodes.split(" "): + cmd='''su - {0} -c "ssh {1} sudo {2}/orainstRoot.sh"'''.format(giuser,node,oinv) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + + def run_rootsh(self): + """ + This function run the root.sh after grid setup + """ + giuser,gihome,gbase,oinv=self.ocommon.get_gi_params() + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + for node in pub_nodes.split(" "): + cmd='''su - {0} -c "ssh {1} sudo {2}/root.sh"'''.format(giuser,node,gihome) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oragiprov.py b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oragiprov.py new file mode 100644 index 0000000000..4819d4ce15 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oragiprov.py @@ -0,0 +1,561 @@ +#!/usr/bin/python3 + +############################# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ + +""" + This file contains to the code call different classes objects based on setup type +""" + +from oralogger import * +from oraenv import * +from oracommon import * +from oramachine import * +from orasetupenv import * +from orasshsetup import * +from oracvu import * +import time + +import os +import sys +import subprocess +import datetime + +class OraGIProv: + """ + This class performs the CVU checks + """ + def __init__(self,oralogger,orahandler,oraenv,oracommon,oracvu,orasetupssh): + try: + self.ologger = oralogger + self.ohandler = orahandler + self.oenv = oraenv.get_instance() + self.ocommon = oracommon + self.ora_env_dict = oraenv.get_env_vars() + self.file_name = os.path.basename(__file__) + self.osetupssh = orasetupssh + self.ocvu = oracvu + self.stopThreaFlag = False + self.mythread = {} + self.myproc = {} + except BaseException as ex: + ex_type, ex_value, ex_traceback = sys.exc_info() + trace_back = sys.tracebacklimit.extract_tb(ex_traceback) + stack_trace = list() + for trace in trace_back: + stack_trace.append("File : %s , Line : %d, Func.Name : %s, Message : %s" % (trace[0], trace[1], trace[2], trace[3])) + self.ocommon.log_info_message(ex_type.__name__,self.file_name) + self.ocommon.log_info_message(ex_value,self.file_name) + self.ocommon.log_info_message(stack_trace,self.file_name) + + def setup(self): + """ + This function setup the grid on this machine + """ + self.ocommon.log_info_message("Start setup()",self.file_name) + ct = datetime.datetime.now() + bts = ct.timestamp() + giuser,gihome,obase,invloc=self.ocommon.get_gi_params() + pubhostname = self.ocommon.get_public_hostname() + retcode1=1 + if not self.ocommon.check_key("GI_SW_UNZIPPED_FLAG",self.ora_env_dict): + retcode1=self.ocvu.check_home(pubhostname,gihome,giuser) + if retcode1 == 0: + bstr="Grid home is already installed on this machine" + self.ocommon.log_info_message(self.ocommon.print_banner(bstr),self.file_name) + if self.ocommon.check_key("GI_HOME_CONFIGURED_FLAG",self.ora_env_dict): + bstr="Grid is already configured on this machine" + self.ocommon.log_info_message(self.ocommon.print_banner(bstr),self.file_name) + else: + self.env_param_checks() + self.ocommon.reset_os_password(giuser) + self.ocommon.log_info_message("Start perform_ssh_setup()",self.file_name) + self.perform_ssh_setup() + self.ocommon.log_info_message("End perform_ssh_setup()",self.file_name) + if self.ocommon.check_key("RESET_FAILED_SYSTEMD",self.ora_env_dict): + self.ocommon.log_info_message("Start reset_failed_units()",self.file_name) + self.reset_failed_units_on_all_nodes() + if self.ocommon.check_key("PERFORM_CVU_CHECKS",self.ora_env_dict): + self.ocommon.log_info_message("Start ocvu.node_reachability_checks()",self.file_name) + self.ocvu.node_reachability_checks("public",self.ora_env_dict["GRID_USER"],"INSTALL") + self.ocommon.log_info_message("End ocvu.node_reachability_checks()",self.file_name) + self.ocommon.log_info_message("Start ocvu.node_connectivity_checks()",self.file_name) + self.ocvu.node_connectivity_checks("public",self.ora_env_dict["GRID_USER"],"INSTALL") + self.ocommon.log_info_message("End ocvu.node_connectivity_checks()",self.file_name) + if retcode1 != 0 and self.ocommon.check_key("COPY_GRID_SOFTWARE",self.ora_env_dict): + self.ocommon.log_info_message("Start crs_sw_instal()",self.file_name) + self.crs_sw_install() + self.ocommon.log_info_message("End crs_sw_instal()",self.file_name) + self.ocommon.log_info_message("Start run_rootsh() and run_orainstsh()",self.file_name) + self.run_orainstsh() + self.run_rootsh() + self.ocommon.log_info_message("End run_rootsh() and run_orainstsh()",self.file_name) + self.ocommon.log_info_message("Start install_cvuqdisk_on_all_nodes()",self.file_name) + self.install_cvuqdisk_on_all_nodes() + self.ocommon.log_info_message("Start crs_config_install()",self.file_name) + gridrsp=self.crs_config_install() + self.ocommon.log_info_message("End crs_config_install()",self.file_name) + self.ocommon.log_info_message("Start run_rootsh()",self.file_name) + self.run_rootsh() + self.ocommon.log_info_message("End run_rootsh()",self.file_name) + self.ocommon.log_info_message("Start execute_postconfig()",self.file_name) + self.run_postroot(gridrsp) + self.ocommon.log_info_message("End execute_postconfig()",self.file_name) + retcode1=self.ocvu.check_ohasd(None) + retcode3=self.ocvu.check_clu(None,None) + if retcode1 != 0 and retcode3 != 0: + self.ocommon.log_info_message("Cluster state is not healthy. Exiting..",self.file_name) + self.ocommon.prog_exit("127") + else: + self.ora_env_dict=self.ocommon.add_key("CLUSTER_SETUP_FLAG","running",self.ora_env_dict) + + self.ocommon.run_custom_scripts("CUSTOM_GRID_SCRIPT_DIR","CUSTOM_GRID_SCRIPT_FILE",giuser) + + ct = datetime.datetime.now() + ets = ct.timestamp() + totaltime=ets - bts + self.ocommon.log_info_message("Total time for setup() = [ " + str(round(totaltime,3)) + " ] seconds",self.file_name) + + def env_param_checks(self): + """ + Perform the env setup checks + """ + self.scan_check() + self.ocommon.check_env_variable("GRID_HOME",True) + self.ocommon.check_env_variable("GRID_BASE",True) + self.ocommon.check_env_variable("INVENTORY",True) + self.ocommon.check_env_variable("ASM_DISCOVERY_DIR",None) + + def scan_check(self): + """ + Check if scan is set + """ + if self.ocommon.check_key("GRID_RESPONSE_FILE",self.ora_env_dict): + self.ocommon.log_info_message("GRID_RESPONSE_FILE is set. Ignoring checking SCAN_NAME as CVU will validate responsefile",self.file_name) + else: + if self.ocommon.check_key("SCAN_NAME",self.ora_env_dict): + self.ocommon.log_info_message("SCAN_NAME variable is set: " + self.ora_env_dict["SCAN_NAME"],self.file_name) + #ipaddr=self.ocommon.get_ip(self.ora_env_dict["SCAN_NAME"]) + #status=self.ocommon.validate_ip(ipaddr) + #if status: + # self.ocommon.log_info_message("SCAN_NAME is a valid IP. Check passed...",self.file_name) + #else: + # self.ocommon.log_error_message("SCAN_NAME is not a valid IP. Check failed. Exiting...",self.file_name) + # self.ocommon.prog_exit("127") + else: + self.ocommon.log_error_message("SCAN_NAME is not set. Exiting...",self.file_name) + self.ocommon.prog_exit("127") + + def perform_ssh_setup(self): + """ + Perform ssh setup + """ + #if not self.ocommon.detect_k8s_env(): + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + crs_nodes=pub_nodes.replace(" ",",") + crs_nodes_list=crs_nodes.split(",") + if len(crs_nodes_list) == 1: + self.ocommon.log_info_message("Cluster size=1. Node=" + crs_nodes_list[0],self.file_name) + user=self.ora_env_dict["GRID_USER"] + cmd='''su - {0} -c "/bin/rm -rf ~/.ssh ; sleep 1; /bin/ssh-keygen -t rsa -q -N \'\' -f ~/.ssh/id_rsa ; sleep 1; /bin/ssh-keyscan {1} > ~/.ssh/known_hosts 2>/dev/null ; sleep 1; /bin/cp ~/.ssh/id_rsa.pub ~/.ssh/authorized_keys"'''.format(user,crs_nodes_list[0]) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + else: + if not self.ocommon.check_key("SSH_PRIVATE_KEY",self.ora_env_dict) and not self.ocommon.check_key("SSH_PUBLIC_KEY",self.ora_env_dict): + user=self.ora_env_dict["GRID_USER"] + ohome=self.ora_env_dict["GRID_HOME"] + self.osetupssh.setupssh(user,ohome,"INSTALL") + #if self.ocommon.check_key("VERIFY_SSH",self.ora_env_dict): + # self.osetupssh.verifyssh(user,"INSTALL") + else: + self.ocommon.log_info_message("SSH setup must be already completed during env setup as this this env variables SSH_PRIVATE_KEY and SSH_PUBLIC_KEY are set.",self.file_name) + + def crs_sw_install(self): + """ + This function performs the crs software install on all the nodes + """ + giuser,gihome,gibase,oinv=self.ocommon.get_gi_params() + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + crs_nodes=pub_nodes.replace(" ",",") + osdba=self.ora_env_dict["OSDBA_GROUP"] if self.ocommon.check_key("OSDBA",self.ora_env_dict) else "asmdba" + osoper=self.ora_env_dict["OSPER_GROUP"] if self.ocommon.check_key("OSPER_GROUP",self.ora_env_dict) else "asmoper" + osasm=self.ora_env_dict["OSASM_GROUP"] if self.ocommon.check_key("OSASM_GROUP",self.ora_env_dict) else "asmadmin" + unixgrp="oinstall" + hostname=self.ocommon.get_public_hostname() + lang=self.ora_env_dict["LANGUAGE"] if self.ocommon.check_key("LANGUAGE",self.ora_env_dict) else "en" + + #copyflag=" -noCopy " + copyflag=" -noCopy " + if not self.ocommon.check_key("COPY_GRID_SOFTWARE",self.ora_env_dict): + copyflag=" -noCopy " + + oraversion=self.ocommon.get_rsp_version("INSTALL",None) + version=oraversion.split(".",1)[0].strip() + + ## Clering the dictionary + self.mythread.clear() + mythreads=[] + #self.mythread.clear() + myproc=[] + + for node in pub_nodes.split(" "): + #self.crs_sw_install_on_node(giuser,copyflag,crs_nodes,oinv,gihome,gibase,osdba,osoper,osasm,version,node) + self.ocommon.log_info_message("Running CRS Sw install on node " + node,self.file_name) + #thread=Thread(target=self.ocommon.crs_sw_install_on_node,args=(giuser,copyflag,crs_nodes,oinv,gihome,gibase,osdba,osoper,osasm,version,node)) + ##thread.setDaemon(True) + #mythreads.append(thread) + + thread=Process(target=self.ocommon.crs_sw_install_on_node,args=(giuser,copyflag,crs_nodes,oinv,gihome,gibase,osdba,osoper,osasm,version,node)) + #thread.setDaemon(True) + mythreads.append(thread) + thread.start() + +# for thread in mythreads: +# thread.start() +# sleep(10) +# self.ocommon.log_info_message("Starting thread ",self.file_name) + + for thread in mythreads: # iterates over the threads + thread.join() # waits until the thread has finished work + self.ocommon.log_info_message("Joining the threads ",self.file_name) + + def crs_config_install(self): + """ + This function performs the crs software install on all the nodes + """ + gridrsp="" + netmasklist=None + + if self.ocommon.check_key("GRID_RESPONSE_FILE",self.ora_env_dict): + gridrsp=self.check_responsefile() + else: + gridrsp,netmasklist=self.prepare_responsefile() + + if self.ocommon.check_key("PERFORM_CVU_CHECKS",self.ora_env_dict): + self.ocvu.cluvfy_checkrspfile(gridrsp,self.ora_env_dict["GRID_HOME"],self.ora_env_dict["GRID_USER"]) + cmd=self.ocommon.get_sw_cmd("INSTALL",gridrsp,None,netmasklist) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + self.check_crs_config_install(output) + + return gridrsp + + def check_responsefile(self): + """ + This function returns the valid response file + """ + gridrsp=None + if self.ocommon.check_key("GRID_RESPONSE_FILE",self.ora_env_dict): + gridrsp=self.ora_env_dict["GRID_RESPONSE_FILE"] + self.ocommon.log_info_message("GRID_RESPONSE_FILE parameter is set and file location is:" + gridrsp ,self.file_name) + + if os.path.isfile(gridrsp): + return gridrsp + else: + self.ocommon.log_error_message("Grid response file does not exist at its location: " + gridrsp + ".Exiting..",self.file_name) + self.ocommon.prog_exit("127") + + def prepare_responsefile(self): + """ + This function prepare the response file if no response file passed + """ + self.ocommon.log_info_message("Preparing Grid responsefile.",self.file_name) + asmfg_disk="" + asm_disk="" + gimrfg_disk="" + gimr_disk="" + giuser,gihome,obase,invloc=self.ocommon.get_gi_params() + dgred=self.ora_env_dict["CRS_ASMDG_REDUNDANCY"] if self.ocommon.check_key("CRS_ASMDG_REDUNDANCY",self.ora_env_dict) else "EXTERNAL" + asmfg_disk,asm_disk=self.ocommon.build_asm_device("CRS_ASM_DEVICE_LIST",dgred) + if self.ocommon.check_key("CLUSTER_TYPE",self.ora_env_dict): + if self.ora_env_dict["CLUSTER_TYPE"] == 'DOMAIN': + gimrfg_disk,gimr_disk=self.ocommon.build_asm_device("GIMR_ASM_DEVICE_LIST",dgred) + + ## Variable Assignments + scanname=self.ora_env_dict["SCAN_NAME"] + scanport=self.ora_env_dict["SCAN_PORT"] if self.ocommon.check_key("SCAN_PORT",self.ora_env_dict) else "1521" + clutype=self.ora_env_dict["CLUSTER_TYPE"] if self.ocommon.check_key("CLUSTER_TYPE",self.ora_env_dict) else "STANDALONE" + cluname=self.ora_env_dict["CLUSTER_NAME"] if self.ocommon.check_key("CLUSTER_NAME",self.ora_env_dict) else "racnode-c" + clunodes=self.ocommon.get_crsnodes() + nwiface,netmasklist=self.ocommon.get_nwifaces() + gimrflag=self.ora_env_dict["GIMR_FLAG"] if self.ocommon.check_key("GIMR",self.ora_env_dict) else "false" + passwd=self.ocommon.get_asm_passwd().replace('\n', ' ').replace('\r', '') + dgname=self.ocommon.rmdgprefix(self.ora_env_dict["CRS_ASM_DISKGROUP"]) if self.ocommon.check_key("CRS_ASM_DISKGROUP",self.ora_env_dict) else "DATA" + fgname=asmfg_disk + asmdisk=asm_disk + discovery_str=self.ocommon.build_asm_discovery_str("CRS_ASM_DEVICE_LIST") + asmstr=self.ora_env_dict["CRS_ASM_DISCOVERY_DIR"] if self.ocommon.check_key("CRS_ASM_DISCOVERY_DIR",self.ora_env_dict) else discovery_str + oraversion=self.ocommon.get_rsp_version("INSTALL",None) + self.ocommon.log_info_message("oraversion" + oraversion, self.file_name) + disksWithFGNames=asmdisk.replace(',',',,') + ',' + self.ocommon.log_info_message("disksWithFGNames" + disksWithFGNames, self.file_name) + gridrsp="/tmp/grid.rsp" + + version=oraversion.split(".",1)[0].strip() + self.ocommon.log_info_message("disk" + version, self.file_name) + if int(version) < 23: + return self.get_responsefile(obase,invloc,scanname,scanport,clutype,cluname,clunodes,nwiface,gimrflag,passwd,dgname,dgred,fgname,asmdisk,asmstr,disksWithFGNames,oraversion,gridrsp,netmasklist) + else: + return self.get_23c_responsefile(obase,invloc,scanname,scanport,clutype,cluname,clunodes,nwiface,gimrflag,passwd,dgname,dgred,fgname,asmdisk,asmstr,disksWithFGNames,oraversion,gridrsp,netmasklist) + + + def get_responsefile(self,obase,invloc,scanname,scanport,clutype,cluname,clunodes,nwiface,gimrflag,passwd,dgname,dgred,fgname,asmdisk,asmstr,disksWithFGNames,oraversion,gridrsp,netmasklist): + """ + This function prepare the response file if no response file passed + """ + self.ocommon.log_info_message("I am in get_responsefile", self.file_name) + rspdata=''' + oracle.install.responseFileVersion=/oracle/install/rspfmt_dbinstall_response_schema_v{15} + oracle.install.option=CRS_CONFIG + ORACLE_BASE={0} + INVENTORY_LOCATION={1} + oracle.install.asm.OSDBA=asmdba + oracle.install.asm.OSOPER=asmoper + oracle.install.asm.OSASM=asmadmin + oracle.install.crs.config.gpnp.scanName={2} + oracle.install.crs.config.gpnp.scanPort={3} + oracle.install.crs.config.clusterName={5} + oracle.install.crs.config.clusterNodes={6} + oracle.install.crs.config.networkInterfaceList={7} + oracle.install.crs.configureGIMR={8} + oracle.install.crs.config.storageOption= + oracle.install.asm.SYSASMPassword={9} + oracle.install.asm.diskGroup.name={10} + oracle.install.asm.diskGroup.redundancy={11} + oracle.install.asm.diskGroup.AUSize=4 + oracle.install.asm.diskGroup.disksWithFailureGroupNames={18} + oracle.install.asm.diskGroup.disks={13} + oracle.install.asm.diskGroup.quorumFailureGroupNames= + oracle.install.asm.diskGroup.diskDiscoveryString={14} + oracle.install.asm.monitorPassword={9} + oracle.install.crs.rootconfig.configMethod=ROOT + oracle.install.asm.configureAFD=false + oracle.install.crs.rootconfig.executeRootScript=false + oracle.install.crs.config.ignoreDownNodes=false + oracle.install.config.managementOption=NONE + oracle.install.crs.configureRHPS={16} + oracle.install.crs.config.ClusterConfiguration={17} + '''.format(obase,invloc,scanname,scanport,clutype,cluname,clunodes,nwiface,gimrflag,passwd,dgname,dgred,fgname,asmdisk,asmstr,oraversion,"false","STANDALONE",disksWithFGNames) +# fdata="\n".join([s for s in rspdata.split("\n") if s]) + self.ocommon.write_file(gridrsp,rspdata) + if os.path.isfile(gridrsp): + return gridrsp,netmasklist + else: + self.ocommon.log_error_message("Grid response file does not exist at its location: " + gridrsp + ".Exiting..",self.file_name) + self.ocommon.prog_exit("127") + + def get_23c_responsefile(self,obase,invloc,scanname,scanport,clutype,cluname,clunodes,nwiface,gimrflag,passwd,dgname,dgred,fgname,asmdisk,asmstr,disksWithFGNames,oraversion,gridrsp,netmasklist): + """ + This function prepare the response file if no response file passed + """ + self.ocommon.log_info_message("I am in get_23c_responsefile", self.file_name) + rspdata=''' + oracle.install.responseFileVersion=/oracle/install/rspfmt_dbinstall_response_schema_v{15} + installOption=CRS_CONFIG + ORACLE_BASE={0} + INVENTORY_LOCATION={1} + OSDBA=asmdba + OSOPER=asmoper + OSASM=asmadmin + clusterUsage={16} + scanName={2} + scanPort={3} + clusterName={5} + clusterNodes={6} + networkInterfaceList={7} + storageOption= + sysasmPassword={9} + diskGroupName={10} + redundancy={11} + auSize=4 + disksWithFailureGroupNames={17} + diskList={13} + quorumFailureGroupNames= + diskString={14} + asmsnmpPassword={9} + configMethod=ROOT + configureAFD=false + executeRootScript=false + ignoreDownNodes=false + managementOption=NONE + '''.format(obase,invloc,scanname,scanport,clutype,cluname,clunodes,nwiface,gimrflag,passwd,dgname,dgred,fgname,asmdisk,asmstr,oraversion,"RAC",disksWithFGNames) +# fdata="\n".join([s for s in rspdata.split("\n") if s]) + self.ocommon.write_file(gridrsp,rspdata) + if os.path.isfile(gridrsp): + return gridrsp,netmasklist + else: + self.ocommon.log_error_message("Grid response file does not exist at its location: " + gridrsp + ".Exiting..",self.file_name) + self.ocommon.prog_exit("127") + + def check_crs_config_install(self,swdata): + """ + This function check the if the sw install went fine + """ + #if not self.ocommon.check_substr_match(swdata,"orainstRoot.sh"): + # self.ocommon.log_error_message("Grid software install failed. Exiting...",self.file_name) + # self.ocommon.prog_exit("127") + if not self.ocommon.check_substr_match(swdata,"root.sh"): + self.ocommon.log_error_message("Grid software install failed. Exiting...",self.file_name) + self.ocommon.prog_exit("127") + if not self.ocommon.check_substr_match(swdata,"executeConfigTools -responseFile"): + self.ocommon.log_error_message("Grid software install failed. Exiting...",self.file_name) + self.ocommon.prog_exit("127") + + def check_crs_sw_install(self,swdata): + """ + This function check the if the sw install went fine + """ + if not self.ocommon.check_substr_match(swdata,"orainstRoot.sh"): + self.ocommon.log_error_message("Grid software install failed. Exiting...",self.file_name) + self.ocommon.prog_exit("127") + if not self.ocommon.check_substr_match(swdata,"root.sh"): + self.ocommon.log_error_message("Grid software install failed. Exiting...",self.file_name) + self.ocommon.prog_exit("127") + + def run_orainstsh(self): + """ + This function run the orainst after grid setup + """ + giuser,gihome,gbase,oinv=self.ocommon.get_gi_params() + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + for node in pub_nodes.split(" "): + cmd='''su - {0} -c "ssh {1} sudo {2}/orainstRoot.sh"'''.format(giuser,node,oinv) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + + def run_rootsh(self): + """ + This function run the root.sh after grid setup + """ + giuser,gihome,gbase,oinv=self.ocommon.get_gi_params() + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + # Clear the dict + self.mythread.clear() + mythreads=[] + for node in pub_nodes.split(" "): + self.ocommon.log_info_message("Running root.sh on node " + node,self.file_name) + thread=Process(target=self.run_rootsh_on_node,args=(node,giuser,gihome)) + #thread.setDaemon(True) + mythreads.append(thread) + thread.start() + +# for thread in mythreads: +# thread.start() +# sleep(10) +# self.ocommon.log_info_message("Starting root.sh thread ",self.file_name) + + for thread in mythreads: # iterates over the threads + thread.join() # waits until the thread has finished wor + self.ocommon.log_info_message("Joining the root.sh thread ",self.file_name) + + def run_rootsh_on_node(self,node,giuser,gihome): + """ + This function run root.sh on a node + """ + cmd='''su - {0} -c "ssh {1} sudo {2}/root.sh"'''.format(giuser,node,gihome) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) +# if len(self.mythread) > 0: +# if node in self.mythread.keys(): +# swthread_list=self.mythread[node] +# value=swthread_list[0] +# new_list=[value,'FALSE'] +# new_val={node,tuple(new_list)} +# self.mythread.update(new_val) + + def run_postroot(self,gridrsp): + """ + This function execute the post root steps: + """ + giuser,gihome,gbase,oinv=self.ocommon.get_gi_params() + cmd='''su - {0} -c "{1}/gridSetup.sh -executeConfigTools -responseFile {2} -silent"'''.format(giuser,gihome,gridrsp) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + + def reset_systemd(self): + """ + This function reset the systemd + This function reset the systemd + """ + pass + while True: + self.ocommon.log_info_message("Root.sh is running. Resetting systemd to avoid failure.",self.file_name) + cmd='''systemctl reset-failed'''.format() + cmd='''systemctl reset-failed'''.format() + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + cmd = '''systemctl is-system-running'''.format() + cmd = '''systemctl is-system-running'''.format() + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + sleep(3) + if self.stopThreaFlag: + break + def reset_failed_units_on_all_nodes(self): + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + for node in pub_nodes.split(" "): + self.ocommon.log_info_message("Running reset_failed_units() on node " + node,self.file_name) + self.reset_failed_units(node) + + def reset_failed_units(self,node): + RESET_FAILED_SYSTEMD = 'true' + SERVICE_NAME = "rhnsd" + SCRIPT_DIR = "/opt/scripts/startup" + RESET_FAILED_UNITS = "resetFailedUnits.sh" + GRID_USER = "grid" + CRON_JOB_FREQUENCY = "* * * * *" + + def error_exit(message): + raise Exception(message) + + giuser,gihome,obase,invloc=self.ocommon.get_gi_params() + + if RESET_FAILED_SYSTEMD != 'false': + if subprocess.run(["pgrep", "-x", SERVICE_NAME], stdout=subprocess.DEVNULL).returncode == 0: + self.ocommon.log_info_message(SERVICE_NAME + " is running.",self.file_name) + # Check if the service is responding + if subprocess.run(["systemctl", "is-active", "--quiet", SERVICE_NAME]).returncode != 0: + self.ocommon.log_info_message(SERVICE_NAME + " is not responding. Stopping the service.",self.file_name) + cmd='''su - {0} -c "ssh {1} sudo systemctl stop {2}"'''.format(giuser,node,SERVICE_NAME) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + cmd='''su - {0} -c "ssh {1} sudo systemctl disable {2}"'''.format(giuser,node,SERVICE_NAME) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + self.ocommon.log_info_message(SERVICE_NAME + "stopped.",self.file_name) + else: + self.ocommon.log_info_message(SERVICE_NAME + " is responsive. No action needed.",self.file_name) + else: + self.ocommon.log_info_message(SERVICE_NAME + " is not running.",self.file_name) + + self.ocommon.log_info_message("Setting Crontab",self.file_name) + cmd = '''su - {0} -c "ssh {1} 'sudo crontab -l | {{ cat; echo \\"{2} {3}/{4}\\"; }} | sudo crontab -'"'''.format(giuser, node, CRON_JOB_FREQUENCY, SCRIPT_DIR, RESET_FAILED_UNITS) + try: + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + self.ocommon.log_info_message("Successfully installed " + SCRIPT_DIR + "/" + RESET_FAILED_UNITS + " using crontab",self.file_name) + except subprocess.CalledProcessError: + error_exit("Error occurred in crontab setup") + + def install_cvuqdisk_on_all_nodes(self): + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + for node in pub_nodes.split(" "): + self.ocommon.log_info_message("Running install_cvuqdisk() on node " + node,self.file_name) + self.install_cvuqdisk(node) + + def install_cvuqdisk(self,node): + rpm_directory = "/u01/app/23c/grid/cv/rpm" + giuser,gihome,obase,invloc=self.ocommon.get_gi_params() + try: + # Construct the rpm command using wildcard for version + cmd = '''su - {0} -c "ssh {1} 'sudo rpm -Uvh {2}/cvuqdisk-*.rpm'"'''.format(giuser, node, rpm_directory) + # Run the rpm command using subprocess + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + self.ocommon.log_info_message("Successfully installed cvuqdisk file.",self.file_name) + + except subprocess.CalledProcessError as e: + self.ocommon.log_error_message("Error installing cvuqdisk. Exiting..." + e,self.file_name) diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oragridadd.py b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oragridadd.py new file mode 100644 index 0000000000..257c696deb --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oragridadd.py @@ -0,0 +1,53 @@ +#!/usr/bin/python3 + +############################# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ + +""" + This file contains to the code call different classes objects based on setup type +""" + +from oralogger import * +from oraenv import * +from oracommon import * +from oramachine import * +from orasetupenv import * +from oraracstdby import * +from oraracadd import * +from oracvu import * +from orasshsetup import * + +import os +import sys + +class OraGridAdd: + """ + This class Add the Grid instances + """ + def __init__(self,oralogger,orahandler,oraenv,oracommon): + try: + self.ologger = oralogger + self.ohandler = orahandler + self.oenv = oraenv.get_instance() + self.ocommon = oracommon + self.ora_env_dict = oraenv.get_env_vars() + self.file_name = os.path.basename(__file__) + self.osetupssh = OraSetupSSH(self.ologger,self.ohandler,self.oenv,self.ocommon) + self.ocvu = OraCvu(self.ologger,self.ohandler,self.oenv,self.ocommon) + except BaseException as ex: + ex_type, ex_value, ex_traceback = sys.exc_info() + trace_back = sys.tracebacklimit.extract_tb(ex_traceback) + stack_trace = list() + for trace in trace_back: + stack_trace.append("File : %s , Line : %d, Func.Name : %s, Message : %s" % (trace[0], trace[1], trace[2], trace[3])) + self.ocommon.log_info_message(ex_type.__name__,self.file_name) + self.ocommon.log_info_message(ex_value,self.file_name) + self.ocommon.log_info_message(stack_trace,self.file_name) + def setup(self): + """ + This function setup the grid on this machine + """ + pass diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oralogger.py b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oralogger.py new file mode 100644 index 0000000000..920cc58ad0 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oralogger.py @@ -0,0 +1,182 @@ +#!/usr/bin/python3 + +############################# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ + +""" + This file provides the functionality to log the event in console and file +""" + +import logging +import os + +class LoggingType(object): + CONSOLE = 1 + FILE = 2 + STDOUT = 3 + +class OraLogger(object): + """ + This is a class constructor which sets parameter for logger. + + Attributes: + filename_ (string): Filename which we need to set to store logs in a file. + """ + def __init__(self, filename_): + """ + This is a class constructor which sets parameter for logger. + + Attributes: + filename_ (string): Filename which we need to set to store logs in a file. + """ + self.filename_ = filename_ + # Set to default values can be changed later from other classes objects + self.console_ = LoggingType.CONSOLE + self.file_ = LoggingType.FILE + self.stdout_ = LoggingType.STDOUT + self.msg_ = None + self.functname_ = None + self.lineno_ = None + self.logtype_ = "INFO" + self.fmtstr_ = "%(asctime)s: %(levelname)s: %(message)s" + self.datestr_ = "%m/%d/%Y %I:%M:%S %p" + self.root = logging.getLogger() + self.root.setLevel(logging.DEBUG) + self.formatter = logging.Formatter('%(asctime)s %(levelname)8s:%(message)s', "%m/%d/%Y %I:%M:%S %p") + self.stdoutfile_ = "/proc/1/fd/1" + #self.stdoutfile_ = "/dev/pts/0" + # self.stdoutfile_ = "/tmp/test.log" + + def getStdOutValue(self): + return self.stdout_ + +class Handler(object): + """ + This is a class which sets the handler for next logger. + """ + def __init__(self): + """ + This is a handler class constructor and nexthandler is set to None. + """ + self.nextHandler = None + + def handle(self, request): + ''' + This is a function which set the next handler. + + Attributes: + request (object): Object of the class oralogger. + ''' + self.nextHandler.handle(request) + + def print_message(self,request,lhandler): + """ + This function set the log type to INFO, WARN, DEBUG and CRITICAL. + + Attribute: + request (object): Object of the class oralogger. + lhandler: This parameter accept the loghandler. + """ + if request.logtype_ == "WARN": + request.root.warning(request.msg_) + elif request.logtype_ == "DEBUG": + request.root.debug(request.msg_) + elif request.logtype_ == "CRITICAL": + request.root.critical(request.msg_) + elif request.logtype_ == "ERROR": + request.root.error(request.msg_) + else: + request.root.info(request.msg_) + + request.root.removeHandler(lhandler) + +class FHandler(Handler): + """ + This is a class which sets the handler for next logger. + """ + def handle(self,request): + """ + This function print the message and call next handler. + + Attribut: + request: Object of OraLogger + """ + if request.file_ == LoggingType.FILE: + fh = logging.FileHandler(request.filename_) + request.root.addHandler(fh) + fh.setFormatter(request.formatter) + self.print_message(request,fh) + super(FHandler, self).handle(request) + else: + super(FHandler, self).handle(request) + + def print_message(self,request,fh): + """ + This function log the message to console/file/stdout. + """ + super(FHandler, self).print_message(request,fh) + +class CHandler(Handler): + """ + This is a class which sets the handler for next logger. + """ + def handle(self,request): + """ + This function print the message and call next handler. + + Attribute: + request: Object of OraLogger + """ + if request.console_ == LoggingType.CONSOLE: + # ch = logging.StreamHandler() + ch = logging.FileHandler("/tmp/test.log") + request.root.addHandler(ch) + ch.setFormatter(request.formatter) + self.print_message(request,ch) + super(CHandler, self).handle(request) + else: + super(CHandler, self).handle(request) + + def print_message(self,request,ch): + """ + This function log the message to console/file/stdout. + """ + super(CHandler, self).print_message(request,ch) + + +class StdHandler(Handler): + """ + This is a class which sets the handler for next logger. + """ + def handle(self,request): + """ + This function print the message and call next handler. + + Attribute: + request: Object of OraLogger + """ + request.stdout_ = request.getStdOutValue() + if request.stdout_ == LoggingType.STDOUT: + st = logging.FileHandler(request.stdoutfile_) + request.root.addHandler(st) + st.setFormatter(request.formatter) + self.print_message(request,st) + super(StdHandler, self).handle(request) + else: + super(StdHandler, self).handle(request) + + def print_message(self,request,st): + """ + This function log the message to console/file/stdout. + """ + super(StdHandler, self).print_message(request,st) + +class PassHandler(Handler): + """ + This is a class which sets the handler for next logger. + """ + def handle(self, request): + pass diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oramachine.py b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oramachine.py new file mode 100644 index 0000000000..62d9b749d7 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oramachine.py @@ -0,0 +1,63 @@ +#!/usr/bin/python3 + +############################# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ + +from oralogger import * +from oraenv import * +from oracommon import * +from oramachine import * +from orasetupenv import * + +import os +import sys + +class OraMachine: + """ + This calss setup the compute before starting the installation. + """ + def __init__(self,oralogger,orahandler,oraenv,oracommon,oracvu,orasetupssh): + """ + This constructor of OraMachine class to setup the compute + + Attributes: + oralogger (object): object of OraLogger Class. + ohandler (object): object of Handler class. + oenv (object): object of singleton OraEnv class. + ocommon(object): object of OraCommon class. + ora_env_dict(dict): Dict of env variable populated based on env variable for the setup. + file_name(string): Filename from where logging message is populated. + """ + self.ologger = oralogger + self.ohandler = orahandler + self.oenv = oraenv.get_instance() + self.ocommon = oracommon + self.ora_env_dict = oraenv.get_env_vars() + self.file_name = os.path.basename(__file__) + self.ocvu = oracvu + self.osetupssh = orasetupssh + self.osetupenv = OraSetupEnv(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) + def setup(self): + """ + This function setup the compute before starting the installation + """ + self.ocommon.log_info_message("Start setup()",self.file_name) + ct = datetime.datetime.now() + bts = ct.timestamp() + + self.memory_check() + self.osetupenv.setup() + + ct = datetime.datetime.now() + ets = ct.timestamp() + totaltime=ets - bts + self.ocommon.log_info_message("Total time for setup() = [ " + str(round(totaltime,3)) + " ] seconds",self.file_name) + + def memory_check(self): + """ + This function check the memory available inside the container + """ + pass diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oramiscops.py b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oramiscops.py new file mode 100644 index 0000000000..3c03b8e068 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oramiscops.py @@ -0,0 +1,426 @@ +#!/usr/bin/python3 + +############################# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ + +""" + This file contains to the code call different classes objects based on setup type +""" + +import os +import sys +import traceback + +from oralogger import * +from oraenv import * +from oracommon import * +from oramachine import * +from orasetupenv import * +from orasshsetup import * +from oracvu import * +from oragiprov import * +from oraasmca import * +from oraracdel import * +from oraracadd import * +from oraracprov import * +from oraracstdby import * + +class OraMiscOps: + """ + This class performs the misc RAC options such as RAC delete + """ + def __init__(self,oralogger,orahandler,oraenv,oracommon,oracvu,orasetupssh): + try: + self.ologger = oralogger + self.ohandler = orahandler + self.oenv = oraenv.get_instance() + self.ocommon = oracommon + self.ora_env_dict = oraenv.get_env_vars() + self.file_name = os.path.basename(__file__) + self.osetupssh = orasetupssh + self.ocvu = oracvu + self.oracstdby = OraRacStdby(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) + except BaseException as ex: + traceback.print_exc(file = sys.stdout) + + def setup(self): + """ + This function setup the RAC home on this machine + """ + self.ocommon.log_info_message("Start setup()",self.file_name) + ct = datetime.datetime.now() + bts = ct.timestamp() + self.ocommon.update_gi_env_vars_from_rspfile() + if self.ocommon.check_key("DBCA_RESPONSE_FILE",self.ora_env_dict): + self.ocommon.update_rac_env_vars_from_rspfile(self.ora_env_dict["DBCA_RESPONSE_FILE"]) + if self.ocommon.check_key("DEL_RACHOME",self.ora_env_dict): + self.delracnode() + else: + pass + + if self.ocommon.check_key("TNS_PARAMS",self.ora_env_dict): + self.populate_tnsfile() + else: + pass + + if self.ocommon.check_key("CHECK_RAC_INST",self.ora_env_dict): + self.checkraclocal() + else: + pass + + if self.ocommon.check_key("CHECK_GI_LOCAL",self.ora_env_dict): + self.checkgilocal() + else: + pass + + if self.ocommon.check_key("CHECK_RAC_DB",self.ora_env_dict): + self.checkracdb() + else: + pass + + if self.ocommon.check_key("CHECK_DB_ROLE",self.ora_env_dict): + self.checkdbrole() + else: + pass + + + if self.ocommon.check_key("CHECK_CONNECT_STR",self.ora_env_dict): + self.checkconnstr() + else: + pass + + if self.ocommon.check_key("CHECK_PDB_CONNECT_STR",self.ora_env_dict): + self.checkpdbconnstr() + else: + pass + + if self.ocommon.check_key("NEW_DB_LSNR_ENDPOINTS",self.ora_env_dict): + self.setupdblsnr() + else: + pass + + if self.ocommon.check_key("NEW_LOCAL_LISTENER",self.ora_env_dict): + self.setuplocallsnr() + else: + pass + + ct = datetime.datetime.now() + ets = ct.timestamp() + totaltime=ets - bts + self.ocommon.log_info_message("Total time for setup() = [ " + str(round(totaltime,3)) + " ] seconds",self.file_name) + + def delracnode(self): + """ + This function delete the racnode + """ + self.ocommon.del_node_params("DEL_PARAMS") + msg="Creating and calling instance to delete the rac node" + oracdel = OraRacDel(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) + self.ocommon.log_info_message(msg,self.file_name) + oracdel.setup() + + def populate_tnsfile(self): + """ + This function populate the tns entry + """ + scanname,scanport,dbuname=self.process_tns_params("TNS_PARAMS") + osuser,dbhome,dbbase,oinv=self.ocommon.get_db_params() + self.oracstdby.create_local_tns_enteries(dbhome,dbuname,scanname,scanport,osuser,"oinstall") + tnsfile='''{0}/network/admin/tnsnames.ora'''.format(dbhome) + self.ocommon.copy_file_cluster(tnsfile,tnsfile,osuser) + + def process_tns_params(self,key): + """ + Process TNS params + """ + scanname=None + scanport=None + dbuname=None + + self.ocommon.log_info_message("Processing TNS Params",self.file_name) + cvar_str=self.ora_env_dict[key] + cvar_str=cvar_str.replace('"', '') + cvar_dict=dict(item.split("=") for item in cvar_str.split(";")) + for ckey in cvar_dict.keys(): + if ckey == 'scan_name': + scanname = cvar_dict[ckey] + if ckey == 'scan_port': + scanport = cvar_dict[ckey] + if ckey == 'db_unique_name': + dbuname = cvar_dict[ckey] + + if not scanport: + scanport=1521 + + if scanname and scanport and dbuname: + return scanname,scanport,dbuname + else: + msg1='''scan_name={0},scan_port={1}'''.format((scanname or "Missing Value"),(scanport or "Missing Value")) + self.ocommon.log_info_message(msg1,self.file_name) + msg2='''db_unique_name={0}'''.format((dbuname or "Missing Value")) + self.ocommon.log_info_message(msg2,self.file_name) + self.ocommon.prog_exit("Error occurred") + + def checkracdb(self): + """ + This will verify RAC DB + """ + status="" + mode="" + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + retcode1=self.ocvu.check_home(None,dbhome,dbuser) + retcode1=0 + if retcode1 != 0: + status="RAC_NOT_INSTALLED_OR_CONFIGURED" + else: + mode=self.checkracsvc() + status=mode + + msg='''Database state is {0}'''.format(status) + self.ocommon.log_info_message(msg,self.file_name) + print(status) + + def checkconnstr(self): + """ + Check the connect str + """ + status="" + mode="" + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + retcode1=self.ocvu.check_home(None,dbhome,dbuser) + retcode1=0 + if retcode1 != 0: + status="RAC_NOT_INSTALLED_OR_CONFIGURED" + else: + state=self.checkracsvc() + if state == 'OPEN': + mode=self.getconnectstr() + else: + mode="NOTAVAILABLE" + + status=mode + + msg='''Database connect str is {0}'''.format(status) + self.ocommon.log_info_message(msg,self.file_name) + print(status) + + def checkpdbconnstr(self): + """ + Check the PDB connect str + """ + status="" + mode="" + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + retcode1=self.ocvu.check_home(None,dbhome,dbuser) + retcode1=0 + if retcode1 != 0: + status="RAC_NOT_INSTALLED_OR_CONFIGURED" + else: + state=self.checkracsvc() + if state == 'OPEN': + mode=self.getpdbconnectstr() + else: + mode="NOTAVAILABLE" + + status=mode + + msg='''PDB connect str is {0}'''.format(status) + self.ocommon.log_info_message(msg,self.file_name) + print(status) + + def checkdbrole(self): + """ + This will verify RAC DB Role + """ + status="" + mode="" + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + #retcode1=self.ocvu.check_home(None,dbhome,dbuser) + retcode1=0 + if retcode1 != 0: + status="RAC_NOT_INSTALLED_OR_CONFIGURED" + else: + mode=self.checkracsvc() + if (mode == "OPEN") or ( mode == "MOUNT"): + osuser,dbhome,dbbase,oinv=self.ocommon.get_db_params() + osid=self.ora_env_dict["DB_NAME"] if self.ocommon.check_key("DB_NAME",self.ora_env_dict) else "ORCLCDB" + scanname=self.ora_env_dict["SCAN_NAME"] + scanport=self.ora_env_dict["SCAN_PORT"] if self.ocommon.check_key("SCAN_PORT",self.ora_env_dict) else "1521" + connect_str=self.ocommon.get_sqlplus_str(dbhome,osid,osuser,"sys",'HIDDEN_STRING',scanname,scanport,osid,None,None,None) + status=self.ocommon.get_db_role(osuser,dbhome,osid,connect_str) + else: + status="NOTAVAILABLE" + + msg='''Database role set to {0}'''.format(status) + self.ocommon.log_info_message(msg,self.file_name) + print(status) + + def getconnectstr(self): + """ + get the connect str + """ + osuser,dbhome,dbbase,oinv=self.ocommon.get_db_params() + osid=self.ora_env_dict["DB_NAME"] if self.ocommon.check_key("DB_NAME",self.ora_env_dict) else "ORCLCDB" + scanname=self.ora_env_dict["SCAN_NAME"] + scanport=self.ora_env_dict["SCAN_PORT"] if self.ocommon.check_key("SCAN_PORT",self.ora_env_dict) else "1521" + ##connect_str=self.ocommon.get_sqlplus_str(dbhome,osid,osuser,"sys",'HIDDEN_STRING',scanname,scanport,osid,None,None,None) + connect_str='''{0}:{1}/{2}'''.format(scanname,scanport,osid) + + return connect_str + + def getpdbconnectstr(self): + """ + get the PDB connect str + """ + osuser,dbhome,dbbase,oinv=self.ocommon.get_db_params() + osid=self.ora_env_dict["PDB_NAME"] if self.ocommon.check_key("PDB_NAME",self.ora_env_dict) else "ORCLPDB" + scanname=self.ora_env_dict["SCAN_NAME"] + scanport=self.ora_env_dict["SCAN_PORT"] if self.ocommon.check_key("SCAN_PORT",self.ora_env_dict) else "1521" + ##connect_str=self.ocommon.get_sqlplus_str(dbhome,osid,osuser,"sys",'HIDDEN_STRING',scanname,scanport,osid,None,None,None) + connect_str='''{0}:{1}/{2}'''.format(scanname,scanport,osid) + + return connect_str + + # def checkracsvc(self): + # """ + ## Check the RAC SVC + # """ + # osuser,dbhome,dbbase,oinv=self.ocommon.get_db_params() + # osid=self.ora_env_dict["DB_NAME"] if self.ocommon.check_key("DB_NAME",self.ora_env_dict) else "ORCLCDB" + # connect_str=self.getconnectstr() + # status=self.get_db_status(osuser,dbhome,osid,connect_str) + # if self.ocommon.check_substr_match(mode,"OPEN"): + # mode="OPENED" + # elif self.ocommon.check_substr_match(mode,"MOUNT"): + ## mode="MOUNTED" + # elif self.ocommon.check_substr_match(mode,"NOMOUNT"): + # mode="NOMOUNT" + # else: + # mode="NOTAVAILABLE" + + # return mode + + def checkracsvc(self): + """ + Check the RAC SVC + """ + mode="" + osuser,dbhome,dbbase,oinv=self.ocommon.get_db_params() + osid=self.ora_env_dict["DB_NAME"] if self.ocommon.check_key("DB_NAME",self.ora_env_dict) else "ORCLCDB" + scanname=self.ora_env_dict["SCAN_NAME"] + scanport=self.ora_env_dict["SCAN_PORT"] if self.ocommon.check_key("SCAN_PORT",self.ora_env_dict) else "1521" + connect_str=self.ocommon.get_sqlplus_str(dbhome,osid,osuser,"sys",'HIDDEN_STRING',scanname,scanport,osid,None,None,None) + status=self.ocommon.get_dbinst_status(osuser,dbhome,osid,connect_str) + if self.ocommon.check_substr_match(status,"OPEN"): + mode="OPEN" + elif self.ocommon.check_substr_match(status,"MOUNT"): + mode="MOUNT" + elif self.ocommon.check_substr_match(status,"NOMOUNT"): + mode="NOMOUNT" + else: + mode="NOTAVAILABLE" + + return mode + + def checkraclocal(self): + """ + Check the RAC software + """ + status="" + mode="" + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + retcode1=self.ocvu.check_home(None,dbhome,dbuser) + retcode1=0 + if retcode1 != 0: + status="RAC_NOT_INSTALLED_OR_CONFIGURED" + else: + mode=self.checkracinst() + status=mode + + msg='''Database instance state is {0}'''.format(status) + self.ocommon.log_info_message(msg,self.file_name) + print(status) + + def checkracinst(self): + """ + This function check the rac inst is up + """ + mode1="" + msg="Checking RAC instance status" + oracdb = OraRacProv(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) + self.ocommon.log_info_message(msg,self.file_name) + status,osid,host,mode=self.ocommon.check_dbinst() + if self.ocommon.check_substr_match(mode,"OPEN"): + mode1="OPEN" + elif self.ocommon.check_substr_match(mode,"MOUNT"): + mode1="MOUNT" + elif self.ocommon.check_substr_match(mode,"NOMOUNT"): + mode1="NOMOUNT" + else: + mode1="NOTAVAILABLE" + + return mode1 + + def checkgilocal(self): + """ + Check GI + """ + status="" + retcode=self.checkgihome() + if retcode != 0: + status="GI_NOT_INSTALLED_OR_CONFIGURED" + else: + node=self.ocommon.get_public_hostname() + retcode1=self.checkclulocal(node) + if retcode1 != 0: + status="NOT HEALTHY" + else: + status="HEALTHY" + msg='''GI status is {0}'''.format(status) + self.ocommon.log_info_message(msg,self.file_name) + print(status) + + def checkclulocal(self,node): + """ + This function check the cluster health + """ + retcode=self.ocvu.check_clu(node,None) + return retcode + + def checkgihome(self): + """ + Check the GI home + """ + giuser,gihome,obase,invloc=self.ocommon.get_gi_params() + pubhostname = self.ocommon.get_public_hostname() + retcode1=self.ocvu.check_home(pubhostname,gihome,giuser) + return retcode1 + + def setupdblsnr(self): + """ + update db lsnr + """ + value=self.ora_env_dict["NEW_DB_LSNR_ENDPOINTS"] + self.ocommon.log_info_message("lsnr new end Points are set to :" + value,self.file_name ) + if self.check_key("DB_LISTENER_ENDPOINTS",self.ora_env_dict): + self.ocommon.log_info_message("lsnr old end points were set to :" + self.ora_env_dict["DB_LISTENER_ENDPOINTS"],self.file_name ) + self.ora_env_dict=self.update_key("DB_LISTENER_ENDPOINTS",value,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("DB_LISTENER_ENDPOINTS",value,self.ora_env_dict) + self.ocommon.setup_db_lsnr() + + def setuplocallsnr(self): + """ + update db lsnr + """ + value=self.ora_env_dict["NEW_LOCAL_LISTENER"] + self.ocommon.log_info_message("local lsnr new end Points are set to :" + value,self.file_name ) + if self.check_key("LOCAL_LISTENER",self.ora_env_dict): + self.ocommon.log_info_message("lsnr old end points were set to :" + self.ora_env_dict["LOCAL_LISTENER"],self.file_name ) + self.ora_env_dict=self.update_key("LOCAL_LISTENER",value,self.ora_env_dict) + else: + self.ora_env_dict=self.add_key("LOCAL_LISTENER",value,self.ora_env_dict) + self.ocommon.set_local_listener() \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracadd.py b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracadd.py new file mode 100644 index 0000000000..5f91b5ae4f --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracadd.py @@ -0,0 +1,225 @@ +#!/usr/bin/python3 + +############################# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ + +""" + This file contains to the code call different classes objects based on setup type +""" + +import os +import sys +import traceback + +from oralogger import * +from oraenv import * +from oracommon import * +from oramachine import * +from orasetupenv import * +from orasshsetup import * +from oraracstdby import * +from oraracadd import * +from oracvu import * +from oragiadd import * + +class OraRacAdd: + """ + This class Add the RAC home and RAC instances + """ + def __init__(self,oralogger,orahandler,oraenv,oracommon,oracvu,orasetupssh): + try: + self.ologger = oralogger + self.ohandler = orahandler + self.oenv = oraenv.get_instance() + self.ocommon = oracommon + self.ora_env_dict = oraenv.get_env_vars() + self.file_name = os.path.basename(__file__) + self.osetupssh = orasetupssh + self.ocvu = oracvu + self.ogiadd = OraGIAdd(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) + except BaseException as ex: + traceback.print_exc(file = sys.stdout) + def setup(self): + """ + This function setup the grid on this machine + """ + self.ocommon.log_info_message("Start setup()",self.file_name) + ct = datetime.datetime.now() + bts = ct.timestamp() + sshFlag=False + self.ocommon.log_info_message("Start ogiadd.setup()",self.file_name) + self.ogiadd.setup() + self.ocommon.log_info_message("End ogiadd.setup()",self.file_name) + self.env_param_checks() + self.clu_checks() + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + retcode1=self.ocvu.check_home(None,dbhome,dbuser) + status=self.ocommon.check_rac_installed(retcode1) + if not status: + sshFlag=True + self.ocommon.log_info_message("Start perform_ssh_setup()",self.file_name) + self.perform_ssh_setup() + self.ocommon.log_info_message("End perform_ssh_setup()",self.file_name) + self.ocommon.log_info_message("Start db_sw_install()",self.file_name) + self.db_sw_install() + self.ocommon.log_info_message("End db_sw_install()",self.file_name) + self.ocommon.log_info_message("Start run_rootsh()",self.file_name) + self.run_rootsh() + self.ocommon.log_info_message("End run_rootsh()",self.file_name) + if not self.ocommon.check_key("SKIP_DBCA",self.ora_env_dict): + status,osid,host,mode=self.ocommon.check_dbinst() + hostname=self.ocommon.get_public_hostname() + if status: + msg='''Database instance {0} already exist on this machine {1}.'''.format(osid,hostname) + self.ocommon.log_info_message(self.ocommon.print_banner(msg),self.file_name) + else: + if not sshFlag: + self.perform_ssh_setup() + self.ocommon.log_info_message("Start add_dbinst()",self.file_name) + self.add_dbinst() + self.ocommon.log_info_message("End add_dbinst()",self.file_name) + self.ocommon.log_info_message("Setting db listener",self.file_name) + self.ocommon.setup_db_lsnr() + self.ocommon.log_info_message("Setting local listener",self.file_name) + self.ocommon.set_local_listener() + self.ocommon.setup_db_service("modify") + sname,osid,opdb,sparams=self.ocommon.get_service_name() + if sname is not None: + self.ocommon.start_db_service(sname,osid) + self.ocommon.check_db_service_status(sname,osid) + self.ocommon.log_info_message("End create_db()",self.file_name) + self.perform_db_check() + status,osid,host,mode=self.ocommon.check_dbinst() + if status: + msg='''Oracle Database {0} is up and running on {1}.'''.format(osid,host) + self.ocommon.log_info_message(self.ocommon.print_banner(msg),self.file_name) + else: + msg='''Oracle Database {0} is not up and running on {1}.'''.format(osid,host) + self.ocommon.log_info_message(self.ocommon.print_banner(msg),self.file_name) + self.ocommon.prog_exit("127") + ct = datetime.datetime.now() + ets = ct.timestamp() + totaltime=ets - bts + self.ocommon.log_info_message("Total time for setup() = [ " + str(round(totaltime,3)) + " ] seconds",self.file_name) + + def env_param_checks(self): + """ + Perform the env setup checks + """ + self.ocommon.check_env_variable("DB_HOME",True) + self.ocommon.check_env_variable("DB_BASE",True) + self.ocommon.check_env_variable("INVENTORY",True) + + def clu_checks(self): + """ + Performing clu checks + """ + self.ocommon.log_info_message("Performing CVU checks on new nodes before DB home installation to make sure clusterware is up and running",self.file_name) + hostname=self.ocommon.get_public_hostname() + retcode1=self.ocvu.check_ohasd(hostname) + retcode2=self.ocvu.check_asm(hostname) + retcode3=self.ocvu.check_clu(hostname,None) + if retcode1 == 0: + msg="Cluvfy ohasd check passed!" + self.ocommon.log_info_message(msg,self.file_name) + else: + msg="Cluvfy ohasd check faild. Exiting.." + self.ocommon.log_error_message(msg,self.file_name) + self.ocommon.prog_exit("127") + + if retcode2 == 0: + msg="Cluvfy asm check passed!" + self.ocommon.log_info_message(msg,self.file_name) + else: + msg="Cluvfy asm check faild. Exiting.." + self.ocommon.log_error_message(msg,self.file_name) + self.ocommon.prog_exit("127") + + if retcode3 == 0: + msg="Cluvfy clumgr check passed!" + self.ocommon.log_info_message(msg,self.file_name) + else: + msg="Cluvfy clumgr check faild. Exiting.." + self.ocommon.log_error_message(msg,self.file_name) + self.ocommon.prog_exit("127") + + def perform_ssh_setup(self): + """ + Perform ssh setup + """ + if not self.ocommon.detect_k8s_env(): + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + self.osetupssh.setupssh(dbuser,dbhome,'ADDNODE') + #if self.ocommon.check_key("VERIFY_SSH",self.ora_env_dict): + #self.osetupssh.verifyssh(dbuser,'ADDNODE') + else: + self.ocommon.log_info_message("SSH setup must be already completed during env setup as this this k8s env.",self.file_name) + + def db_sw_install(self): + """ + Perform the db_install + """ + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + crs_nodes=pub_nodes.replace(" ",",") + hostname=self.ocommon.get_public_hostname() + existing_crs_nodes=self.ocommon.get_existing_clu_nodes(True) + node="" + nodeflag=False + for cnode in existing_crs_nodes.split(","): + retcode3=self.ocvu.check_clu(cnode,True) + if retcode3 == 0: + node=cnode + nodeflag=True + break + + copyflag="" + if not self.ocommon.check_key("COPY_GRID_SOFTWARE",self.ora_env_dict): + copyflag=" -noCopy " + + if nodeflag: + cmd='''su - {0} -c "ssh -vvv {4} 'sh {1}/addnode/addnode.sh \\"CLUSTER_NEW_NODES={{{2}}}\\" -skipPrereqs -waitForCompletion -ignoreSysPrereqs {3} -silent'"'''.format(dbuser,dbhome,crs_nodes,copyflag,node) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + else: + self.ocommon.log_error_message("Clusterware is not up on any node : " + existing_crs_nodes + ".Exiting...",self.file_name) + self.prog_exit("127") + + def run_rootsh(self): + """ + This function run the root.sh after DB home install + """ + dbuser,dbhome,dbbase,oinv=self.ocommon.get_db_params() + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + for node in pub_nodes.split(" "): + cmd='''su - {0} -c "ssh {1} sudo {2}/root.sh"'''.format(dbuser,node,dbhome) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + + def add_dbinst(self): + """ + This function add the DB inst + """ + dbuser,dbhome,dbbase,oinv=self.ocommon.get_db_params() + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + existing_crs_nodes=self.ocommon.get_existing_clu_nodes(True) + node="" + nodeflag=False + for cnode in existing_crs_nodes.split(","): + retcode3=self.ocvu.check_clu(cnode,True) + if retcode3 == 0: + node=cnode + nodeflag=True + break + if nodeflag: + dbname,osid,dbuname=self.ocommon.getdbnameinfo() + for new_node in pub_nodes.split(" "): + cmd='''su - {0} -c "ssh {2} '{1}/bin/dbca -addInstance -silent -nodeName {3} -gdbName {4}'"'''.format(dbuser,dbhome,node,new_node,osid) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + else: + self.ocommon.log_error_message("Clusterware is not up on any node : " + existing_crs_nodes + ".Exiting...",self.file_name) + self.prog_exit("127") diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracdel.py b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracdel.py new file mode 100644 index 0000000000..e8e1659c32 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracdel.py @@ -0,0 +1,272 @@ +#!/usr/bin/python3 + +############################# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ + +""" + This file contains to the code call different classes objects based on setup type +""" + +import os +import sys +import traceback + +from oralogger import * +from oraenv import * +from oracommon import * +from oramachine import * +from orasetupenv import * +from orasshsetup import * +from oracvu import * +from oragiprov import * +from oraasmca import * + +class OraRacDel: + """ + This class delete the RAC database + """ + def __init__(self,oralogger,orahandler,oraenv,oracommon,oracvu,orasetupssh): + try: + self.ologger = oralogger + self.ohandler = orahandler + self.oenv = oraenv.get_instance() + self.ocommon = oracommon + self.ora_env_dict = oraenv.get_env_vars() + self.file_name = os.path.basename(__file__) + self.osetupssh = orasetupssh + self.ocvu = oracvu + except BaseException as ex: + traceback.print_exc(file = sys.stdout) + + def setup(self): + """ + This function setup the RAC home on this machine + """ + self.ocommon.log_info_message("Start setup()",self.file_name) + ct = datetime.datetime.now() + bts = ct.timestamp() + self.env_param_checks() + giuser,gihome,obase,invloc=self.ocommon.get_gi_params() + self.ocommon.populate_existing_cls_nodes() + #self.clu_checks() + hostname=self.ocommon.get_public_hostname() + if self.ocommon.check_key("EXISTING_CLS_NODE",self.ora_env_dict): + if len(self.ora_env_dict["EXISTING_CLS_NODE"].split(",")) == 0: + self.ora_env_dict=self.add_key("LAST_CRS_NODE","true",self.ora_env_dict) + + self.del_dbinst_main(hostname) + self.del_dbhome_main(hostname) + self.del_gihome_main(hostname) + self.del_ginode(hostname) + if self.ocommon.detect_k8s_env(): + if self.ocommon.check_key("EXISTING_CLS_NODE",self.ora_env_dict): + node=self.ora_env_dict["EXISTING_CLS_NODE"].split(",")[0] + self.ocommon.update_scan(giuser,gihome,None,node) + self.ocommon.start_scan(giuser,gihome,node) + self.ocommon.update_scan_lsnr(giuser,gihome,node) + + ct = datetime.datetime.now() + ets = ct.timestamp() + totaltime=ets - bts + self.ocommon.log_info_message("Total time for setup() = [ " + str(round(totaltime,3)) + " ] seconds",self.file_name) + +##### Check env vars ######## + + def env_param_checks(self): + """ + Perform the env setup checks + """ + self.ocommon.check_env_variable("DB_HOME",True) + self.ocommon.check_env_variable("DB_BASE",True) + self.ocommon.check_env_variable("INVENTORY",True) + + def clu_checks(self): + """ + Performing clu checks + """ + self.ocommon.log_info_message("Performing CVU checks before DB home installation to make sure clusterware is up and running",self.file_name) + hostname=self.ocommon.get_public_hostname() + retcode1=self.ocvu.check_ohasd(hostname) + retcode2=self.ocvu.check_asm(hostname) + retcode3=self.ocvu.check_clu(hostname,None) + + if retcode1 == 0: + msg="Cluvfy ohasd check passed!" + self.ocommon.log_info_message(msg,self.file_name) + else: + msg="Cluvfy ohasd check faild. Exiting.." + self.ocommon.log_error_message(msg,self.file_name) + self.ocommon.prog_exit("127") + + if retcode2 == 0: + msg="Cluvfy asm check passed!" + self.ocommon.log_info_message(msg,self.file_name) + else: + msg="Cluvfy asm check faild. Exiting.." + self.ocommon.log_error_message(msg,self.file_name) + self.ocommon.prog_exit("127") + + if retcode3 == 0: + msg="Cluvfy clumgr check passed!" + self.ocommon.log_info_message(msg,self.file_name) + else: + msg="Cluvfy clumgr check faild. Exiting.." + self.ocommon.log_error_message(msg,self.file_name) + self.ocommon.prog_exit("127") + + +######### Deleting DB Instnce ####### + def del_dbinst_main(self,hostname): + """ + This function call the del_dbinst to perform the db instance deletion + """ + if self.ocommon.check_key("LAST_CRS_NODE",self.ora_env_dict): + msg='''This is a last node {0} in the cluster.'''.format(hostname) + self.ocommon.log_info_message(msg,self.file_name) + else: + status,osid,host,mode=self.ocommon.check_dbinst() + msg='''Database instance {0} exist on this machine {1}.'''.format(osid,hostname) + self.ocommon.log_info_message(msg,self.file_name) + self.del_dbinst() + status,osid,host,mode=self.ocommon.check_dbinst() + if status: + msg='''Oracle Database {0} is stil up and running on {1}.'''.format(osid,host) + self.ocommon.log_info_message(self.ocommon.print_banner(msg),self.file_name) + self.ocommon.prog_exit("127") + else: + msg='''Oracle Database {0} is not up and running on {1}.'''.format(osid,host) + self.ocommon.log_info_message(self.ocommon.print_banner(msg),self.file_name) + + def del_dbinst(self): + """ + Perform the db instance deletion + """ + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + dbname,osid,dbuname=self.ocommon.getdbnameinfo() + hostname=self.ocommon.get_public_hostname() + inst_sid=self.ocommon.get_inst_sid(dbuser,dbhome,dbname,hostname) + existing_crs_nodes=self.ocommon.get_existing_clu_nodes(True) + node="" + nodeflag=False + for cnode in existing_crs_nodes.split(","): + retcode3=self.ocvu.check_clu(cnode,True) + if retcode3 == 0: + node=cnode + nodeflag=True + break + + if inst_sid: + if nodeflag: + cmd='''su - {0} -c "ssh {4} '{1}/bin/dbca -silent -ignorePrereqFailure -deleteInstance -gdbName {2} -nodeName {5} -instanceName {3}'"'''.format(dbuser,dbhome,dbname,inst_sid,node,hostname) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + else: + self.ocommon.log_error_message("Clusterware is not up on any node : " + existing_crs_nodes + ".Exiting...",self.file_name) + self.ocommon.prog_exit("127") + else: + self.ocommon.log_info_message("No database instance is up and running on this machine!",self.file_name) + +####### DEL RAC DB HOME ######## + def del_dbhome_main(self,hostname): + """ + This function call the del_dbhome to perform the db home deletion + """ + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + if self.ocommon.check_key("DEL_RACHOME",self.ora_env_dict): + retcode1=self.ocvu.check_home(hostname,dbhome,dbuser) + status=self.ocommon.check_rac_installed(retcode1) + if status: + self.del_dbhome() + else: + self.ocommon.log_info_message("No configured RAC home exist on this machine",self.file_name) + + def del_dbhome(self): + """ + Perform the db home deletion + """ + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + tmpdir=self.ocommon.get_tmpdir() + dbrspdir="/{1}/dbdeinstall_{0}".format(time.strftime("%T"),tmpdir) + self.ocommon.create_dir(dbrspdir,"local",None,"oracle","oinstall") + self.generate_delrspfile(dbrspdir,dbuser,dbhome) + dbrspfile=self.ocommon.latest_file(dbrspdir) + if os.path.isfile(dbrspfile): + cmd='''su - {0} -c "{1}/deinstall/deinstall -silent -local -paramfile {2} "'''.format(dbuser,dbhome,dbrspfile) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,False) + else: + self.ocommon.log_error_message("No responsefile exist under " + dbrspdir,self.file_name) + self.ocommon.prog_exit("127") + + def generate_delrspfile(self,rspdir,user,home): + """ + Generate the responsefile to perform home deletion + """ + cmd='''su - {0} -c "{1}/deinstall/deinstall -silent -checkonly -local -o {2}"'''.format(user,home,rspdir) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + +####### DEL GI HOME ######## + def del_gihome_main(self,hostname): + """ + This function call the del_gihome to perform the gi home deletion + """ + giuser,gihome,gbase,oinv=self.ocommon.get_gi_params() + self.ocommon.log_info_message("gi params " + gihome ,self.file_name) + if self.ocommon.check_key("DEL_GIHOME",self.ora_env_dict): + retcode1=self.ocvu.check_home(hostname,gihome,giuser) + status=self.ocommon.check_gi_installed(retcode1,gihome,giuser) + if status: + self.del_gihome() + else: + self.ocommon.log_info_message("No configured GI home exist on this machine",self.file_name) + + def del_gihome(self): + """ + Perform the GI home deletion + """ + giuser,gihome,gbase,oinv=self.ocommon.get_gi_params() + tmpdir=self.ocommon.get_tmpdir() + girspdir="/{1}/gideinstall_{0}".format(time.strftime("%T"),tmpdir) + self.ocommon.create_dir(girspdir,"local",None,"grid","oinstall") + self.generate_delrspfile(girspdir,giuser,gihome) + girspfile=self.ocommon.latest_file(girspdir) + if os.path.isfile(girspfile): + cmd='''su - {0} -c "export TEMP={3};{1}/deinstall/deinstall -silent -local -paramfile {2} "'''.format(giuser,gihome,girspfile,"/var/tmp") + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + deinstallDir=self.ocommon.latest_dir(tmpdir,'deins*/') + cmd='''{0}/rootdeinstall.sh'''.format(deinstallDir) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,False) + else: + self.ocommon.log_error_message("No responsefile exist under " + girspdir,self.file_name) + self.ocommon.prog_exit("127") + + def del_ginode(self,hostname): + """ + Perform the GI Node deletion + """ + giuser,gihome,gbase,oinv=self.ocommon.get_gi_params() + + existing_crs_nodes=self.ocommon.get_existing_clu_nodes(True) + node="" + nodeflag=False + for cnode in existing_crs_nodes.split(","): + retcode3=self.ocvu.check_clu(cnode,True) + if retcode3 == 0: + node=cnode + nodeflag=True + break + + if nodeflag: + cmd='''su - {0} -c "ssh {2} '/bin/sudo {1}/bin/crsctl delete node -n {3}'"'''.format(giuser,gihome,node,hostname) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + else: + self.ocommon.log_error_message("Clusterware is not up on any node : " + existing_crs_nodes + ".Exiting...",self.file_name) + self.ocommon.prog_exit("127") + diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracprov.py b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracprov.py new file mode 100755 index 0000000000..9588858a84 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracprov.py @@ -0,0 +1,544 @@ +#!/usr/bin/python3 + +############################# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ + +""" + This file contains to the code call different classes objects based on setup type +""" + +from distutils.log import debug +import os +import sys +import traceback +import datetime + +from oralogger import * +from oraenv import * +from oracommon import * +from oramachine import * +from orasetupenv import * +from orasshsetup import * +from oracvu import * +from oragiprov import * +from oraasmca import * + +dgname="" +dbfiledest="" +dbrdest="" + +class OraRacProv: + """ + This class provision the RAC database + """ + def __init__(self,oralogger,orahandler,oraenv,oracommon,oracvu,orasetupssh): + try: + self.ologger = oralogger + self.ohandler = orahandler + self.oenv = oraenv.get_instance() + self.ocommon = oracommon + self.ora_env_dict = oraenv.get_env_vars() + self.file_name = os.path.basename(__file__) + self.osetupssh = orasetupssh + self.ocvu = oracvu + self.mythread = {} + self.ogiprov = OraGIProv(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) + self.oasmca = OraAsmca(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) + except BaseException as ex: + traceback.print_exc(file = sys.stdout) + + def setup(self): + """ + This function setup the RAC home on this machine + """ + self.ocommon.log_info_message("Start setup()",self.file_name) + ct = datetime.datetime.now() + bts = ct.timestamp() + sshFlag=False + self.ogiprov.setup() + self.env_param_checks() + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + crs_nodes=pub_nodes.replace(" ",",") + if not self.ocommon.check_key("CLUSTER_SETUP_FLAG",self.ora_env_dict): + for node in crs_nodes.split(","): + self.clu_checks(node) + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + retcode1=self.ocvu.check_home(None,dbhome,dbuser) + status=self.ocommon.check_rac_installed(retcode1) + if not status: + self.ocommon.log_info_message("Start perform_ssh_setup()",self.file_name) + self.perform_ssh_setup() + self.ocommon.log_info_message("End perform_ssh_setup()",self.file_name) + sshFlag=True + status=self.ocommon.check_home_inv(None,dbhome,dbuser) + if not status: + self.ocommon.log_info_message("Start db_sw_install()",self.file_name) + self.db_sw_install() + self.ocommon.log_info_message("End db_sw_install()",self.file_name) + self.ocommon.log_info_message("Start run_rootsh()",self.file_name) + self.run_rootsh() + self.ocommon.log_info_message("End run_rootsh()",self.file_name) + if not self.ocommon.check_key("SKIP_DBCA",self.ora_env_dict): + self.create_asmdg() + status,osid,host,mode=self.ocommon.check_dbinst() + hostname=self.ocommon.get_public_hostname() + if status: + msg='''Database instance {0} already exist on this machine {1}.'''.format(osid,hostname) + self.ocommon.log_info_message(self.ocommon.print_banner(msg),self.file_name) + elif self.ocommon.check_key("CLONE_DB",self.ora_env_dict): + self.ocommon.log_info_message("Start clone_db()",self.file_name) + self.clone_db(crs_nodes) + else: + if not sshFlag: + self.perform_ssh_setup() + self.ocommon.log_info_message("Start create_db()",self.file_name) + self.create_db() + self.ocommon.log_info_message("Setting db listener",self.file_name) + self.ocommon.setup_db_lsnr() + self.ocommon.log_info_message("Setting local listener",self.file_name) + self.ocommon.set_local_listener() + self.ocommon.setup_db_service("create") + sname,osid,opdb,sparams=self.ocommon.get_service_name() + if sname is not None: + self.ocommon.start_db_service(sname,osid) + self.ocommon.check_db_service_status(sname,osid) + self.ocommon.log_info_message("End create_db()",self.file_name) + self.perform_db_check() + ct = datetime.datetime.now() + ets = ct.timestamp() + totaltime=ets - bts + self.ocommon.log_info_message("Total time for setup() = [ " + str(round(totaltime,3)) + " ] seconds",self.file_name) + + def env_param_checks(self): + """ + Perform the env setup checks + """ + self.ocommon.check_env_variable("DB_HOME",True) + self.ocommon.check_env_variable("DB_BASE",True) + self.ocommon.check_env_variable("INVENTORY",True) + + def clu_checks(self,hostname): + """ + Performing clu checks + """ + self.ocommon.log_info_message("Performing CVU checks before DB home installation to make sure clusterware is up and running on " + hostname,self.file_name) + # hostname=self.ocommon.get_public_hostname() + retcode1=self.ocvu.check_ohasd(hostname) + retcode2=self.ocvu.check_asm(hostname) + retcode3=self.ocvu.check_clu(hostname,None) + + if retcode1 == 0: + msg="Cluvfy ohasd check passed!" + self.ocommon.log_info_message(msg,self.file_name) + else: + msg="Cluvfy ohasd check faild. Exiting.." + self.ocommon.log_error_message(msg,self.file_name) + self.ocommon.prog_exit("127") + + if retcode2 == 0: + msg="Cluvfy asm check passed!" + self.ocommon.log_info_message(msg,self.file_name) + else: + msg="Cluvfy asm check faild. Exiting.." + self.ocommon.log_error_message(msg,self.file_name) + #self.ocommon.prog_exit("127") + + if retcode3 == 0: + msg="Cluvfy clumgr check passed!" + self.ocommon.log_info_message(msg,self.file_name) + else: + msg="Cluvfy clumgr check faild. Exiting.." + self.ocommon.log_error_message(msg,self.file_name) + self.ocommon.prog_exit("127") + + def perform_ssh_setup(self): + """ + Perform ssh setup + """ + #if not self.ocommon.detect_k8s_env(): + if not self.ocommon.check_key("SSH_PRIVATE_KEY",self.ora_env_dict) and not self.ocommon.check_key("SSH_PUBLIC_KEY",self.ora_env_dict): + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + self.osetupssh.setupssh(dbuser,dbhome,"INSTALL") + #if self.ocommon.check_key("VERIFY_SSH",self.ora_env_dict): + #self.osetupssh.verifyssh(dbuser,"INSTALL") + + def db_sw_install(self): + """ + Perform the db_install + """ + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + crs_nodes=pub_nodes.replace(" ",",") + osdba=self.ora_env_dict["OSDBA_GROUP"] if self.ocommon.check_key("OSDBA",self.ora_env_dict) else "dba" + osbkp=self.ora_env_dict["OSBACKUPDBA_GROUP"] if self.ocommon.check_key("OSBACKUPDBA_GROUP",self.ora_env_dict) else "backupdba" + osoper=self.ora_env_dict["OSPER_GROUP"] if self.ocommon.check_key("OSPER_GROUP",self.ora_env_dict) else "oper" + osdgdba=self.ora_env_dict["OSDGDBA_GROUP"] if self.ocommon.check_key("OSDGDBA_GROUP",self.ora_env_dict) else "dgdba" + oskmdba=self.ora_env_dict["OSKMDBA_GROUP"] if self.ocommon.check_key("OSKMDBA_GROUP",self.ora_env_dict) else "kmdba" + osracdba=self.ora_env_dict["OSRACDBA_GROUP"] if self.ocommon.check_key("OSRACDBA_GROUP",self.ora_env_dict) else "racdba" + osasm=self.ora_env_dict["OSASM_GROUP"] if self.ocommon.check_key("OSASM_GROUP",self.ora_env_dict) else "asmadmin" + unixgrp="oinstall" + hostname=self.ocommon.get_public_hostname() + lang=self.ora_env_dict["LANGUAGE"] if self.ocommon.check_key("LANGUAGE",self.ora_env_dict) else "en" + edition= self.ora_env_dict["DB_EDITION"] if self.ocommon.check_key("DB_EDITION",self.ora_env_dict) else "EE" + ignoreflag= " -ignorePrereq " if self.ocommon.check_key("IGNORE_DB_PREREQS",self.ora_env_dict) else " " + + copyflag=" -noCopy " + if not self.ocommon.check_key("COPY_DB_SOFTWARE",self.ora_env_dict): + copyflag=" -noCopy " + + mythread_list=[] + + oraversion=self.ocommon.get_rsp_version("INSTALL",None) + version=oraversion.split(".",1)[0].strip() + + self.mythread.clear() + mythreads=[] + for node in pub_nodes.split(" "): + self.ocommon.log_info_message("Running DB Sw install on node " + node,self.file_name) + thread=Process(target=self.db_sw_install_on_node,args=(dbuser,hostname,unixgrp,crs_nodes,oinv,lang,dbhome,dbase,edition,osdba,osbkp,osdgdba,oskmdba,osracdba,copyflag,node,ignoreflag)) + #thread.setDaemon(True) + mythreads.append(thread) + thread.start() + +# for thread in mythreads: +# self.ocommon.log_info_message("Starting Thread",self.file_name) +# thread.start() + + for thread in mythreads: # iterates over the threads + thread.join() # waits until the thread has finished wor + + #self.manage_thread() + + def db_sw_install_on_node(self,dbuser,hostname,unixgrp,crs_nodes,oinv,lang,dbhome,dbase,edition,osdba,osbkp,osdgdba,oskmdba,osracdba,copyflag,node,ignoreflag): + """ + Perform the db_install + """ + runCmd="" + if self.ocommon.check_key("APPLY_RU_LOCATION",self.ora_env_dict): + ruLoc=self.ora_env_dict["APPLY_RU_LOCATION"] + runCmd='''runInstaller -applyRU "{0}"'''.format(self.ora_env_dict["APPLY_RU_LOCATION"]) + else: + runCmd='''runInstaller ''' + + + if self.ocommon.check_key("DEBUG_MODE",self.ora_env_dict): + dbgCmd='''{0} -debug '''.format(runCmd) + runCmd=dbgCmd + + rspdata='''su - {0} -c "ssh {17} {1}/{16} {18} -waitforcompletion {15} -silent + oracle.install.option=INSTALL_DB_SWONLY + ORACLE_HOSTNAME={2} + UNIX_GROUP_NAME={3} + oracle.install.db.CLUSTER_NODES={4} + INVENTORY_LOCATION={5} + SELECTED_LANGUAGES={6} + ORACLE_HOME={7} + ORACLE_BASE={8} + oracle.install.db.InstallEdition={9} + oracle.install.db.OSDBA_GROUP={10} + oracle.install.db.OSBACKUPDBA_GROUP={11} + oracle.install.db.OSDGDBA_GROUP={12} + oracle.install.db.OSKMDBA_GROUP={13} + oracle.install.db.OSRACDBA_GROUP={14} + SECURITY_UPDATES_VIA_MYORACLESUPPORT=false + DECLINE_SECURITY_UPDATES=true"'''.format(dbuser,dbhome,hostname,unixgrp,crs_nodes,oinv,lang,dbhome,dbase,edition,osdba,osbkp,osdgdba,oskmdba,osracdba,copyflag,runCmd,node,ignoreflag) + cmd=rspdata.replace('\n'," ") + #dbswrsp="/tmp/dbswrsp.rsp" + #self.ocommon.write_file(dbswrsp,rspdata) + #if os.path.isfile(dbswrsp): + #cmd='''su - {0} -c "{1}/runInstaller -ignorePrereq -waitforcompletion -silent -responseFile {2}"'''.format(dbuser,dbhome,dbswrsp) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + #else: + # self.ocommon.log_error_message("DB response file does not exist at its location: " + dbswrsp + ".Exiting..",self.file_name) + # self.ocommon.prog_exit("127") + if len(self.mythread) > 0: + if node in self.mythread.keys(): + swthread_list=self.mythread[node] + value=swthread_list[0] + new_list=[value,'FALSE'] + new_val={node,tuple(new_list)} + self.mythread.update(new_val) + + def run_rootsh(self): + """ + This function run the root.sh after DB home install + """ + dbuser,dbhome,dbbase,oinv=self.ocommon.get_db_params() + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + for node in pub_nodes.split(" "): + cmd='''su - {0} -c "ssh {1} sudo {2}/root.sh"'''.format(dbuser,node,dbhome) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + + def create_asmdg(self): + """ + Perform the asm disk group creation + """ + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + if (self.ocommon.check_key("REDO_ASM_DEVICE_LIST",self.ora_env_dict)) and (self.ocommon.check_key("LOG_FILE_DEST",self.ora_env_dict)): + lgdest=self.ocommon.rmdgprefix(self.ora_env_dict["LOG_FILE_DEST"]) + device_prop=self.ora_env_dict["REDO_ASMDG_PROPERTIES"] if self.ocommon.check_key("REDO_ASMDG_PROPERTIES",self.ora_env_dict) else None + self.ocommon.log_info_message("dg validation for :" + lgdest + " is in progress", self.file_name) + status=self.oasmca.validate_dg(self.ora_env_dict["REDO_ASM_DEVICE_LIST"],device_prop,lgdest) + if not status: + self.oasmca.create_dg(self.ora_env_dict["REDO_ASM_DEVICE_LIST"],device_prop,lgdest) + else: + self.ocommon.log_info_message("ASM diskgroup exist!",self.file_name) + + if (self.ocommon.check_key("RECO_ASM_DEVICE_LIST",self.ora_env_dict)) and (self.ocommon.check_key("DB_RECOVERY_FILE_DEST",self.ora_env_dict)): + dbrdest=self.ocommon.rmdgprefix(self.ora_env_dict["DB_RECOVERY_FILE_DEST"]) + device_prop=self.ora_env_dict["RECO_ASMDG_PROPERTIES"] if self.ocommon.check_key("RECO_ASMDG_PROPERTIES",self.ora_env_dict) else None + self.ocommon.log_info_message("dg validation for :" + dbrdest + " is in progress", self.file_name) + status=self.oasmca.validate_dg(self.ora_env_dict["RECO_ASM_DEVICE_LIST"],device_prop,dbrdest) + if not status: + self.oasmca.create_dg(self.ora_env_dict["RECO_ASM_DEVICE_LIST"],device_prop,dbrdest) + else: + self.ocommon.log_info_message("ASM diskgroup exist!",self.file_name) + + if (self.ocommon.check_key("DB_ASM_DEVICE_LIST",self.ora_env_dict)) and (self.ocommon.check_key("DB_DATA_FILE_DEST",self.ora_env_dict)): + dbfiledest=self.ocommon.rmdgprefix(self.ora_env_dict["DB_DATA_FILE_DEST"]) + device_prop=self.ora_env_dict["DB_ASMDG_PROPERTIES"] if self.ocommon.check_key("DB_ASMDG_PROPERTIES",self.ora_env_dict) else None + self.ocommon.log_info_message("dg validation for :" + dbfiledest + " is in progress", self.file_name) + status=self.oasmca.validate_dg(self.ora_env_dict["DB_ASM_DEVICE_LIST"],device_prop,dbfiledest) + if not status: + self.oasmca.create_dg(self.ora_env_dict["DB_ASM_DEVICE_LIST"],device_prop,dbfiledest) + else: + self.ocommon.log_info_message("ASM diskgroup exist!",self.file_name) + + def set_clonedb_params(self): + """ + Set clone database parameters + """ + osuser,dbhome,dbbase,oinv=self.ocommon.get_db_params() + dgname=self.ocommon.setdgprefix(self.ocommon.getcrsdgname()) + dbfiledest=self.ocommon.setdgprefix(self.ocommon.getdbdestdgname(dgname)) + dbrdest=self.ocommon.setdgprefix(self.ocommon.getdbrdestdgname(dbfiledest)) + osid=self.ora_env_dict["GOLD_SID_NAME"] + connect_str=self.ocommon.get_sqlplus_str(dbhome,osid,osuser,"sys",None,None,None,osid,None,None,None) + sqlcmd=''' + alter system set control_files='{1}' scope=spfile; + ALTER SYSTEM SET DB_CREATE_FILE_DEST='{0}' scope=spfile sid='*'; + ALTER SYSTEM SET DB_RECOVERY_FILE_DEST='{1}' scope=spfile sid='*'; + '''.format(dbfiledest,dbrdest) + output=self.ocommon.run_sql_cmd(sqlcmd,connect_str) + + def clone_db(self,crs_nodes): + """ + This function clone the DB + """ + if self.ocommon.check_key("GOLD_DB_BACKUP_LOC",self.ora_env_dict) and self.ocommon.check_key("GOLD_DB_NAME",self.ora_env_dict) and self.ocommon.check_key("DB_NAME",self.ora_env_dict) and self.ocommon.check_key("GOLD_SID_NAME",self.ora_env_dict) and self.ocommon.check_key("GOLD_PDB_NAME",self.ora_env_dict): + self.ocommon.log_info_message("GOLD_DB_BACKUP_LOC set to " + self.ora_env_dict["GOLD_DB_BACKUP_LOC"] ,self.file_name) + self.ocommon.log_info_message("GOLD_DB_NAME set to " + self.ora_env_dict["GOLD_DB_NAME"] ,self.file_name) + self.ocommon.log_info_message("DB_NAME set to " + self.ora_env_dict["DB_NAME"] ,self.file_name) + pfile='''/tmp/pfile_{0}'''.format( datetime.datetime.now().strftime('%d%m%Y%H%M')) + self.ocommon.create_file(pfile,"local",None,None) + fdata='''db_name={0}'''.format(self.ora_env_dict["GOLD_DB_NAME"]) + self.ocommon.append_file(pfile,fdata) + self.ocommon.start_db(self.ora_env_dict["GOLD_SID_NAME"],"nomount",pfile) + ## VV self.ocommon.catalog_bkp() + self.ocommon.restore_spfile() + cmd='''rm -f {0}'''.format(pfile) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,False) + self.ocommon.shutdown_db(self.ora_env_dict["GOLD_SID_NAME"]) + self.ocommon.start_db(self.ora_env_dict["GOLD_SID_NAME"],"nomount") + self.set_clonedb_params() + self.ocommon.shutdown_db(self.ora_env_dict["GOLD_SID_NAME"]) + self.ocommon.start_db(self.ora_env_dict["GOLD_SID_NAME"],"nomount") + self.ocommon.restore_bkp(self.ocommon.setdgprefix(self.ocommon.getcrsdgname())) + + osuser,dbhome,dbbase,oinv=self.ocommon.get_db_params() + osid=self.ora_env_dict["GOLD_SID_NAME"] + pfile=dbhome + "/dbs/init" + osid + ".ora" + spfile=dbhome + "/dbs/spfile" + osid + ".ora" + + self.ocommon.create_pfile(pfile,spfile) + self.ocommon.shutdown_db(self.ora_env_dict["GOLD_SID_NAME"]) + self.ocommon.set_cluster_mode(pfile,False) + self.ocommon.start_db(self.ora_env_dict["GOLD_SID_NAME"],"mount",pfile) + self.ocommon.change_dbname(pfile,self.ora_env_dict["DB_NAME"]) + + self.ocommon.start_db(self.ora_env_dict["DB_NAME"] + "1","mount",pfile) + spfile=self.ocommon.getdbdestdgname("+DATA") + "/" + self.ora_env_dict["DB_NAME"] + "/PARAMETERFILE/spfile" + self.ora_env_dict["DB_NAME"] + ".ora" + self.ocommon.create_spfile(spfile,pfile) + self.ocommon.resetlogs(self.ora_env_dict["DB_NAME"] + "1") + self.ocommon.shutdown_db(self.ora_env_dict["DB_NAME"] + "1") + self.ocommon.add_rac_db(osuser,dbhome,self.ora_env_dict["DB_NAME"],spfile) + instance_number=1 + for node in crs_nodes.split(","): + self.ocommon.add_rac_instance(osuser,dbhome,self.ora_env_dict["DB_NAME"],str(instance_number),node) + instance_number +=1 + + self.ocommon.start_rac_db(osuser,dbhome,self.ora_env_dict["DB_NAME"]) + self.ocommon.get_db_status(osuser,dbhome,self.ora_env_dict["DB_NAME"]) + self.ocommon.get_db_config(osuser,dbhome,self.ora_env_dict["DB_NAME"]) + self.ocommon.log_info_message("End clone_db()",self.file_name) + + def check_responsefile(self): + """ + This function returns the valid response file + """ + dbrsp=None + if self.ocommon.check_key("DBCA_RESPONSE_FILE",self.ora_env_dict): + dbrsp=self.ora_env_dict["DBCA_RESPONSE_FILE"] + self.ocommon.log_info_message("DBCA_RESPONSE_FILE parameter is set and file location is:" + dbrsp ,self.file_name) + else: + self.ocommon.log_error_message("DBCA response file does not exist at its location: " + dbrsp + ".Exiting..",self.file_name) + self.ocommon.prog_exit("127") + + if os.path.isfile(dbrsp): + return dbrsp + + def create_db(self): + """ + Perform the DB Creation + """ + cmd="" + prereq=" " + if self.ocommon.check_key("IGNORE_DB_PREREQS",self.ora_env_dict): + prereq=" -ignorePreReqs " + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + if self.ocommon.check_key("DBCA_RESPONSE_FILE",self.ora_env_dict): + dbrsp=self.check_responsefile() + cmd='''su - {0} -c "{1}/bin/dbca -silent {3} -createDatabase -responseFile {2}"'''.format(dbuser,dbhome,dbrsp,prereq) + else: + cmd=self.prepare_db_cmd() + + dbpasswd=self.ocommon.get_db_passwd() + tdepasswd=self.ocommon.get_tde_passwd() + self.ocommon.set_mask_str(dbpasswd) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + ### Unsetting the encrypt value to None + self.ocommon.unset_mask_str() + + def prepare_db_cmd(self): + """ + Perform the asm disk group creation + """ + prereq=" " + if self.ocommon.check_key("IGNORE_DB_PREREQS",self.ora_env_dict): + prereq=" -ignorePreReqs " + + tdewallet="" + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + crs_nodes=pub_nodes.replace(" ",",") + dbname,osid,dbuname=self.ocommon.getdbnameinfo() + dgname=self.ocommon.setdgprefix(self.ocommon.getcrsdgname()) + dbfiledest=self.ocommon.setdgprefix(self.ocommon.getdbdestdgname(dgname)) + cdbflag=self.ora_env_dict["CONTAINERDB_FLAG"] if self.ocommon.check_key("CONTAINERDB_FLAG",self.ora_env_dict) else "true" + stype=self.ora_env_dict["DB_STORAGE_TYPE"] if self.ocommon.check_key("DB_STORAGE_TYPE",self.ora_env_dict) else "ASM" + charset=self.ora_env_dict["DB_CHARACTERSET"] if self.ocommon.check_key("DB_CHARACTERSET",self.ora_env_dict) else "AL32UTF8" + redosize=self.ora_env_dict["DB_REDOFILE_SIZE"] if self.ocommon.check_key("DB_REDOFILE_SIZE",self.ora_env_dict) else "1024" + dbtype=self.ora_env_dict["DB_TYPE"] if self.ocommon.check_key("DB_TYPE",self.ora_env_dict) else "OLTP" + dbctype=self.ora_env_dict["DB_CONFIG_TYPE"] if self.ocommon.check_key("DB_CONFIG_TYPE",self.ora_env_dict) else "RAC" + arcmode=self.ora_env_dict["ENABLE_ARCHIVELOG"] if self.ocommon.check_key("ENABLE_ARCHIVELOG",self.ora_env_dict) else "true" + pdbsettings=self.get_pdb_params() + initparams=self.get_init_params() + if self.ocommon.check_key("SETUP_TDE_WALLET",self.ora_env_dict): + tdewallet='''-configureTDE true -tdeWalletPassword HIDDEN_STRING -tdeWalletRoot {0} -tdeWalletLoginType AUTO_LOGIN -encryptTablespaces all'''.format(dbfiledest) + #memorypct=self.get_memorypct() + + rspdata='''su - {0} -c "{1}/bin/dbca -silent {15} -createDatabase \ + -templateName General_Purpose.dbc \ + -gdbname {2} \ + -createAsContainerDatabase {3} \ + -sysPassword HIDDEN_STRING \ + -systemPassword HIDDEN_STRING \ + -datafileDestination {4} \ + -storageType {5} \ + -characterSet {6} \ + -redoLogFileSize {7} \ + -databaseType {8} \ + -databaseConfigType {9} \ + -nodelist {10} \ + -useOMF true \ + {12} \ + {13} \ + {16} \ + -enableArchive {14}"'''.format(dbuser,dbhome,dbname,cdbflag,dbfiledest,stype,charset,redosize,dbtype,dbctype,crs_nodes,dbname,pdbsettings,initparams,arcmode,prereq,tdewallet) + cmd='\n'.join(line.lstrip() for line in rspdata.splitlines()) + + return cmd + + def get_pdb_params(self): + """ + Perform the asm disk group creation + """ + pdbnum=self.ora_env_dict["PDB_COUNT"] if self.ocommon.check_key("PDB_COUNT",self.ora_env_dict) else "1" + pdbname=self.ora_env_dict["ORACLE_PDB_NAME"] if self.ocommon.check_key("ORACLE_PDB_NAME",self.ora_env_dict) else "ORCLPDB" + rspdata='''-numberOfPDBs {0} \ + -pdbAdminPassword HIDDEN_STRING \ + -pdbName {1}'''.format(pdbnum,pdbname) + cmd='\n'.join(line.lstrip() for line in rspdata.splitlines()) + return cmd + + def get_init_params(self): + """ + Perform the asm disk group creation + """ + sgasize=self.ora_env_dict["INIT_SGA_SIZE"] if self.ocommon.check_key("INIT_SGA_SIZE",self.ora_env_dict) else None + pgasize=self.ora_env_dict["INIT_PGA_SIZE"] if self.ocommon.check_key("INIT_PGA_SIZE",self.ora_env_dict) else None + processes=self.ora_env_dict["INIT_PROCESSES"] if self.ocommon.check_key("INIT_PROCESSES",self.ora_env_dict) else None + dbname,osid,dbuname=self.ocommon.getdbnameinfo() + dgname=self.ocommon.setdgprefix(self.ocommon.getcrsdgname()) + dbdest=self.ocommon.setdgprefix(self.ocommon.getdbdestdgname(dgname)) + dbrdest=self.ocommon.setdgprefix(self.ocommon.getdbrdestdgname(dbdest)) + dbrdestsize=self.ora_env_dict["DB_RECOVERY_FILE_DEST_SIZE"] if self.ocommon.check_key("DB_RECOVERY_FILE_DEST_SIZE",self.ora_env_dict) else None + cpucount=self.ora_env_dict["CPU_COUNT"] if self.ocommon.check_key("CPU_COUNT",self.ora_env_dict) else None + dbfiles=self.ora_env_dict["DB_FILES"] if self.ocommon.check_key("DB_FILES",self.ora_env_dict) else "1024" + lgbuffer=self.ora_env_dict["LOG_BUFFER"] if self.ocommon.check_key("LOG_BUFFER",self.ora_env_dict) else "256M" + dbrettime=self.ora_env_dict["DB_FLASHBACK_RETENTION_TARGET"] if self.ocommon.check_key("DB_FLASHBACK_RETENTION_TARGET",self.ora_env_dict) else "120" + dbblkck=self.ora_env_dict["DB_BLOCK_CHECKSUM"] if self.ocommon.check_key("DB_BLOCK_CHECKSUM",self.ora_env_dict) else "TYPICAL" + dblwp=self.ora_env_dict["DB_LOST_WRITE_PROTECT"] if self.ocommon.check_key("DB_LOST_WRITE_PROTECT",self.ora_env_dict) else "TYPICAL" + ptpc=self.ora_env_dict["PARALLEL_THREADS_PER_CPU"] if self.ocommon.check_key("PARALLEL_THREADS_PER_CPU",self.ora_env_dict) else "1" + dgbr1=self.ora_env_dict["DG_BROKER_CONFIG_FILE1"] if self.ocommon.check_key("DG_BROKER_CONFIG_FILE1",self.ora_env_dict) else dbdest + dgbr2=self.ora_env_dict["DG_BROKER_CONFIG_FILE2"] if self.ocommon.check_key("DG_BROKER_CONFIG_FILE2",self.ora_env_dict) else dbrdest + remotepasswdfile="REMOTE_LOGIN_PASSWORDFILE=EXCLUSIVE" + lgformat="LOG_ARCHIVE_FORMAT=%t_%s_%r.arc" + + initprm='''db_recovery_file_dest={0},db_create_file_dest={2},{3},{4},db_unique_name={5},db_files={6},LOG_BUFFER={7},DB_FLASHBACK_RETENTION_TARGET={8},DB_BLOCK_CHECKSUM={9},DB_LOST_WRITE_PROTECT={10},PARALLEL_THREADS_PER_CPU={11},DG_BROKER_CONFIG_FILE1={12},DG_BROKER_CONFIG_FILE2={13}'''.format(dbrdest,dbrdest,dbdest,remotepasswdfile,lgformat,dbuname,dbfiles,lgbuffer,dbrettime,dbblkck,dblwp,ptpc,dgbr1,dgbr2) + + if sgasize: + initprm= initprm + ''',sga_target={0},sga_max_size={0}'''.format(sgasize) + + if pgasize: + initprm= initprm + ''',pga_aggregate_size={0}'''.format(pgasize) + + if processes: + initprm= initprm + ''',processes={0}'''.format(processes) + + if cpucount: + initprm= initprm + ''',cpu_count={0}'''.format(cpucount) + + if dbrdestsize: + initprm = initprm + ''',db_recovery_file_dest_size={0}'''.format(dbrdestsize) + + initparams=""" -initparams '{0}'""".format(initprm) + + return initparams + + def perform_db_check(self): + """ + This function check the DB and print the message" + """ + status,osid,host,mode=self.ocommon.check_dbinst() + if status: + self.ocommon.rac_setup_complete() + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + msg='''Oracle Database {0} is up and running on {1}.'''.format(osid,host) + self.ocommon.log_info_message(self.ocommon.print_banner(msg),self.file_name) + self.ocommon.run_custom_scripts("CUSTOM_DB_SCRIPT_DIR","CUSTOM_DB_SCRIPT_FILE",dbuser) + self.ocommon.set_remote_listener() + os.system("echo ORACLE RAC DATABASE IS READY TO USE > /dev/pts/0") + msg='''ORACLE RAC DATABASE IS READY TO USE''' + self.ocommon.log_info_message(self.ocommon.print_banner(msg),self.file_name) + else: + msg='''Oracle Database {0} is not up and running on {1}.'''.format(osid,host) + self.ocommon.log_info_message(self.ocommon.print_banner(msg),self.file_name) + self.ocommon.prog_exit("127") diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracstdby.py b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracstdby.py new file mode 100644 index 0000000000..9c6e79b72a --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracstdby.py @@ -0,0 +1,643 @@ +#!/usr/bin/python3 + +############################# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ + +""" + This file contains to the code call different classes objects based on setup type +""" + +from distutils.log import debug +import os +import sys +import traceback + +from oralogger import * +from oraenv import * +from oracommon import * +from oramachine import * +from orasetupenv import * +from orasshsetup import * +from oracvu import * +from oragiprov import * +from oraasmca import * +from oraracprov import * + +class OraRacStdby: + """ + This class Add the RAC standby + """ + def __init__(self,oralogger,orahandler,oraenv,oracommon,oracvu,orasetupssh): + try: + self.ologger = oralogger + self.ohandler = orahandler + self.oenv = oraenv.get_instance() + self.ocommon = oracommon + self.ora_env_dict = oraenv.get_env_vars() + self.file_name = os.path.basename(__file__) + self.osetupssh = orasetupssh + self.ocvu = oracvu + self.ogiprov = OraGIProv(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) + self.oasmca = OraAsmca(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) + self.oraracprov = OraRacProv(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) + except BaseException as ex: + ex_type, ex_value, ex_traceback = sys.exc_info() + trace_back = traceback.extract_tb(ex_traceback) + stack_trace = list() + for trace in trace_back: + stack_trace.append("File : %s , Line : %d, Func.Name : %s, Message : %s" % (trace[0], trace[1], trace[2], trace[3])) + self.ocommon.log_info_message(ex_type.__name__,self.file_name) + self.ocommon.log_info_message(ex_value,self.file_name) + self.ocommon.log_info_message(stack_trace,self.file_name) + + def setup(self): + """ + This function setup the RAC stndby on this machine + """ + self.ocommon.log_info_message("Start setup()",self.file_name) + ct = datetime.datetime.now() + bts = ct.timestamp() + sshFlag=False + self.ogiprov.setup() + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + crs_nodes=pub_nodes.replace(" ",",") + for node in crs_nodes.split(","): + self.oraracprov.clu_checks(node) + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + retcode1=self.ocvu.check_home(None,dbhome,dbuser) + status=self.ocommon.check_rac_installed(retcode1) + if not status: + self.oraracprov.perform_ssh_setup() + sshFlag=True + status=self.ocommon.check_home_inv(None,dbhome,dbuser) + if not status: + self.ocommon.log_info_message("Start oraracprov.db_sw_install()",self.file_name) + self.oraracprov.db_sw_install() + self.ocommon.log_info_message("End oraracprov.db_sw_install()",self.file_name) + self.ocommon.log_info_message("Start oraracprov.run_rootsh()",self.file_name) + self.oraracprov.run_rootsh() + self.ocommon.log_info_message("End oraracprov.run_rootsh()",self.file_name) + if not self.ocommon.check_key("SKIP_DBCA",self.ora_env_dict): + self.oraracprov.create_asmdg() + status,osid,host,mode=self.ocommon.check_dbinst() + hostname=self.ocommon.get_public_hostname() + if status: + msg='''Database instance {0} already exist on this machine {1}.'''.format(osid,hostname) + self.ocommon.log_info_message(self.ocommon.print_banner(msg),self.file_name) + else: + if not sshFlag: + self.oraracprov.perform_ssh_setup() + self.check_primary_db() + self.ocommon.log_info_message("Start configure_primary_db()",self.file_name) + self.configure_primary_db() + self.ocommon.log_info_message("End configure_primary_db()",self.file_name) + self.ocommon.log_info_message("Start create_standbylogs()",self.file_name) + self.create_standbylogs() + self.ocommon.log_info_message("End create_standbylogs()",self.file_name) + #self.populate_tnsfile() + #self.copy_tnsfile(dbhome,dbuser) + self.ocommon.log_info_message("Start create_db()",self.file_name) + self.create_db() + self.ocommon.log_info_message("End create_db()",self.file_name) + self.ocommon.log_info_message("Start configure_standby_db()",self.file_name) + self.configure_standby_db() + self.ocommon.log_info_message("End configure_standby_db()",self.file_name) + ### Calling populate TNS again as create_db reset the oldtnames.ora + #self.populate_tnsfile() + #self.copy_tnsfile(dbhome,dbuser) + self.configure_dgsetup() + self.restart_db() + + ct = datetime.datetime.now() + ets = ct.timestamp() + totaltime=ets - bts + self.ocommon.log_info_message("Total time for setup() = [ " + str(round(totaltime,3)) + " ] seconds",self.file_name) + + def get_stdby_variables(self): + """ + Getting stdby variables + """ + stdbydbuname =self.ora_env_dict["DB_UNIQUE_NAME"] if self.ocommon.check_key("DB_UNIQUE_NAME",self.ora_env_dict) else "SORCLCDB" + prmydbuname =self.ora_env_dict["PRIMARY_DB_UNIQUE_NAME"] if self.ocommon.check_key("PRIMARY_DB_UNIQUE_NAME",self.ora_env_dict) else None + prmydbport =self.ora_env_dict["PRIMARY_DB_SCAN_PORT"] if self.ocommon.check_key("PRIMARY_DB_SCAN_PORT",self.ora_env_dict) else 1521 + prmydbname =self.ora_env_dict["PRIMARY_DB_NAME"] if self.ocommon.check_key("PRIMARY_DB_NAME",self.ora_env_dict) else None + prmyscanname =self.ora_env_dict["PRIMARY_DB_SCAN_NAME"] if self.ocommon.check_key("PRIMARY_DB_SCAN_NAME",self.ora_env_dict) else None + + return stdbydbuname,prmydbuname,prmydbport,prmydbname,prmyscanname + + def get_primary_connect_str(self): + ''' + return primary connect str + ''' + stdbydbuname,prmydbuname,prmydbport,prmydbname,prmyscanname=self.get_stdby_variables() + osuser,dbhome,dbbase,oinv=self.ocommon.get_db_params() + osid=self.ora_env_dict["PRIMARY_DB_UNIQUE_NAME"] if self.ocommon.check_key("PRIMARY_DB_UNIQUE_NAME",self.ora_env_dict) else None + connect_str=self.ocommon.get_sqlplus_str(dbhome,osid,osuser,"sys",'HIDDEN_STRING',prmyscanname,prmydbport,osid,None,None,None) + + return connect_str,osuser,dbhome,dbbase,oinv,osid + + def get_standby_connect_str(self): + ''' + return standby connect str + ''' + stdbydbuname,prmydbuname,prmydbport,prmydbname,prmyscanname=self.get_stdby_variables() + osuser,dbhome,dbbase,oinv=self.ocommon.get_db_params() + stdbyscanname=self.ora_env_dict["SCAN_NAME"] if self.ocommon.check_key("SCAN_NAME",self.ora_env_dict) else self.prog_exit("127") + stdbyscanport=self.ora_env_dict["SCAN_PORT"] if self.ocommon.check_key("SCAN_PORT",self.ora_env_dict) else "1521" + connect_str=self.ocommon.get_sqlplus_str(dbhome,stdbydbuname,osuser,"sys",'HIDDEN_STRING',stdbyscanname,stdbyscanport,stdbydbuname,None,None,None +) + + return connect_str,osuser,dbhome,dbbase,oinv,stdbydbuname + + def get_stdby_dg_name(self): + ''' + return DG name + ''' + dgname=self.ora_env_dict["CRS_ASM_DISKGROUP"] if self.ocommon.check_key("CRS_ASM_DISKGROUP",self.ora_env_dict) else "+DATA" + dbrdest=self.ora_env_dict["DB_RECOVERY_FILE_DEST"] if self.ocommon.check_key("DB_RECOVERY_FILE_DEST",self.ora_env_dict) else dgname + dbrdestsize=self.ora_env_dict["DB_RECOVERY_FILE_DEST_SIZE"] if self.ocommon.check_key("DB_RECOVERY_FILE_DEST_SIZE",self.ora_env_dict) else "50G" + dbdest=self.ora_env_dict["DB_CREATE_FILE_DEST"] if self.ocommon.check_key("DB_CREATE_FILE_DEST",self.ora_env_dict) else dbrdest + + return self.ocommon.setdgprefix(dbrdest),dbrdestsize,self.ocommon.setdgprefix(dbdest),self.ocommon.setdgprefix(dgname) + + def check_primary_db(self): + """ + Checking primary DB before proceeding to STDBY Setup + """ + stdbydbuname,prmydbuname,prmydbport,prmydbname,prmyscanname=self.get_stdby_variables() + self.ocommon.log_info_message("Checking primary DB",self.file_name) + status=None + counter=1 + end_counter=45 + + connect_str,osuser,dbhome,dbbase,oinv,osid=self.get_primary_connect_str() + + while counter < end_counter: + status=self.ocommon.check_setup_status(osuser,dbhome,osid,connect_str) + if status == 'completed': + break + else: + msg='''Primary DB {0} setup is still not completed as primary check did not return "completed". Sleeping for 60 seconds and sleeping count is {0}'''.format(counter) + self.ocommon.log_info_message(msg,self.file_name) + time.sleep(60) + counter=counter+1 + + if status == 'completed': + msg='''Primary Database {0} is open!'''.format(prmydbuname) + self.ocommon.log_info_message(msg,self.file_name) + else: + msg='''Primary DB {0} is not in open state.Primary DB setup did not complete or failed. Exiting...''' + self.ocommon.log_error_message(msg,self.file_name) + self.ocommon.prog_exit("127") + + + def configure_primary_db(self): + """ + Setup Primary for standby + """ + stdbydbuname,prmydbuname,prmydbport,prmydbname,prmyscanname=self.get_stdby_variables() + connect_str,osuser,dbhome,dbbase,oinv,osid=self.get_primary_connect_str() + stdbyscanname=self.ora_env_dict["SCAN_NAME"] if self.ocommon.check_key("SCAN_NAME",self.ora_env_dict) else self.prog_exit("127") + stdbyscanport=self.ora_env_dict["SCAN_PORT"] if self.ocommon.check_key("SCAN_PORT",self.ora_env_dict) else "1521" + prmytnssvc=self.ocommon.get_tnssvc_str(prmydbuname,prmydbport,prmyscanname) + stdbytnssvc=self.ocommon.get_tnssvc_str(stdbydbuname,stdbyscanport,stdbyscanname) + msg='''Setting up Primary DB for standby''' + self.ocommon.log_info_message(msg,self.file_name) + stdbylgdg,dbrdestsize,stdbydbdg,dgname=self.get_stdby_dg_name() + lgdest1="""LOCATION=USE_DB_RECOVERY_FILE_DEST VALID_FOR=(ALL_LOGFILES,ALL_ROLES) DB_UNIQUE_NAME={0}""".format(prmydbuname) + lgdest2='''SERVICE="{0}" ASYNC VALID_FOR=(ONLINE_LOGFILE,PRIMARY_ROLE) DB_UNIQUE_NAME={1}'''.format(stdbytnssvc,stdbydbuname) + dbconfig="""DG_CONFIG=({0},{1})""".format(prmydbuname,stdbydbuname) + prmydbdg=self.ocommon.get_init_params("db_create_file_dest",connect_str) + prmylsdg=self.ocommon.get_init_params("DB_RECOVERY_FILE_DEST",connect_str) + dbconv="""'{0}','{1}'""".format(stdbydbdg,prmydbdg) + lgconv="""'{0}','{1}'""".format(stdbylgdg,prmylsdg) + prmy_dbname=self.ocommon.get_init_params("DB_NAME",connect_str) + dgbroker=prmyscanname=self.ora_env_dict["DG_BROKER_START"] if self.ocommon.check_key("DG_BROKER_START",self.ora_env_dict) else "true" + + + sqlcmd=""" + alter database force logging; + alter database flashback on; + alter system set db_recovery_file_dest_size=30G scope=both sid='*'; + alter system set LOG_ARCHIVE_DEST_1='{0}' scope=both sid='*'; + alter system set LOG_ARCHIVE_DEST_2='{1}' scope=both sid='*'; + alter system set LOG_ARCHIVE_DEST_STATE_1=ENABLE scope=both sid='*'; + alter system set LOG_ARCHIVE_DEST_STATE_2=ENABLE scope=both sid='*'; + alter system set LOG_ARCHIVE_CONFIG='{2}' scope=both sid='*'; + alter system set FAL_SERVER='{9}' scope=both sid='*'; + alter system set STANDBY_FILE_MANAGEMENT=AUTO scope=both sid='*'; + alter system set DB_FILE_NAME_CONVERT={4} scope=both sid='*'; + alter system set LOG_FILE_NAME_CONVERT={5} scope=both sid='*'; + alter system set dg_broker_start=true scope=both sid='*'; + alter system set DB_BLOCK_CHECKSUM='TYPICAL' scope=both sid='*'; + alter system set DB_LOST_WRITE_PROTECT='TYPICAL' scope=both sid='*'; + alter system set DB_FLASHBACK_RETENTION_TARGET=120 scope=both sid='*'; + alter system set PARALLEL_THREADS_PER_CPU=1 scope=both sid='*'; + """.format(lgdest1,lgdest2,dbconfig,stdbydbuname,dbconv,lgconv,dgbroker,prmylsdg,prmydbdg,stdbytnssvc) + + output=self.ocommon.run_sql_cmd(sqlcmd,connect_str) + + def get_logfile_info(self,connect_str): + """ + get the primary log info + """ + sqlsetcmd=self.ocommon.get_sqlsetcmd() + sqlcmd1=''' + {0} + select max(thread#) from gv$log; + '''.format(sqlsetcmd) + + sqlcmd2=''' + {0} + select count(*) from gv$log; + '''.format(sqlsetcmd) + + sqlcmd3=''' + {0} + select * from (select count(*) from v$log group by thread#) where rownum < 2; + '''.format(sqlsetcmd) + + sqlcmd4=''' + {0} + select min(group#) from gv$log; + '''.format(sqlsetcmd) + + sqlcmd5=''' + {0} + select max(MEMBERS) from gv$log; + '''.format(sqlsetcmd) + + sqlcmd6=''' + {0} + select count(*) from gv$standby_log; + ''' .format(sqlsetcmd) + + sqlcmd7=''' + {0} + select max(group#) from gv$standby_log; + '''.format(sqlsetcmd) + + sqlcmd8=''' + {0} + select bytes from v$log where rownum < 2; + '''.format(sqlsetcmd) + + sqlcmd9=''' + {0} + select max(group#) from v$log; + '''.format(sqlsetcmd) + + maxthread=self.ocommon.run_sql_cmd(sqlcmd1,connect_str) + maxgrpcount=self.ocommon.run_sql_cmd(sqlcmd2,connect_str) + maxgrpnum=self.ocommon.run_sql_cmd(sqlcmd3,connect_str) + mingrpnum=self.ocommon.run_sql_cmd(sqlcmd4,connect_str) + maxgrpmemnum=self.ocommon.run_sql_cmd(sqlcmd5,connect_str) + maxstdbygrpcount=self.ocommon.run_sql_cmd(sqlcmd6,connect_str) + maxstdbygrpnum=self.ocommon.run_sql_cmd(sqlcmd7,connect_str) + filesize=self.ocommon.run_sql_cmd(sqlcmd8,connect_str) + maxgrp=self.ocommon.run_sql_cmd(sqlcmd9,connect_str) + + return int(maxthread),int(maxgrpcount),int(maxgrpnum),int(mingrpnum),int(maxgrpmemnum),int(maxstdbygrpcount),maxstdbygrpnum,int(filesize),int(maxgrp) + + def create_standbylogs(self): + """ + Setup standby logs on Primary + """ + stdbydbuname,prmydbuname,prmydbport,prmydbname,prmyscanname=self.get_stdby_variables() + connect_str,osuser,dbhome,dbbase,oinv,osid=self.get_primary_connect_str() + maxthread,maxgrpcount,maxgrpnum,mingrpnum,maxgrpmemnum,maxstdbygrpcount,maxstdbygrpnum,filesize,maxgrp=self.get_logfile_info(connect_str) + threadcount=1 + mingrpmemnum=1 + stdbygrp=0 + + msg=''' + Received Values : + Max Thread={0} + Max Log Group Count={1} + Max Log Group Number={2} + Min Log Group Num={3} + Max Group Member = {4} + Max Standby Group Count = {5} + Max Standby Group Number = {6} + File Size = {7} + Max Groups = {8} + '''.format(maxthread,maxgrpcount,maxgrpnum,mingrpnum,maxgrpmemnum,maxstdbygrpcount,maxstdbygrpnum,filesize,maxgrp) + + self.ocommon.log_info_message(msg,self.file_name) + dbrdest=self.ocommon.get_init_params("DB_RECOVERY_FILE_DEST",connect_str) + + if maxstdbygrpcount != 0: + if maxstdbygrpcount == ((maxgrp + 1) * maxthread): + msg1='''The required standby logs already exist. The current number of max primary group is {1} and max threads are {3}. The standby logs groups is to "((maxgrp + 1) * maxthread)"= {0} '''.format(((maxgrp + 1) * maxthread),maxgrp,maxthread) + self.ocommon.log_info_message(msg1,self.file_name) + else: + stdbygrp=(maxgrp + 1) * maxthread + msg1='''The current number of max primary log group is {1} and max threads are {2}. The required standby logs groups "((maxgrp + 1) * maxthread)"= {0}'''.format(((maxgrp + 1) * maxthread),maxgrp,maxthread) + self.ocommon.log_info_message(msg1,self.file_name) + + # Setting the standby logs to the value which will start after maxgrpcount + mingrpnum=(maxgrp+1) + newstdbygrp=stdbygrp + threadcount=1 + group_per_thread=((stdbygrp - maxgrp )/maxthread) + group_per_thread_count=1 + + msg='''Logfile thread maxthread={1}, groups per thread={2}'''.format(threadcount,maxthread,group_per_thread) + self.ocommon.log_info_message(msg,self.file_name) + msg='''Standby logfiles minigroup set to={0} and maximum group set to={1}'''.format(mingrpnum,newstdbygrp) + self.ocommon.log_info_message(msg,self.file_name) + msg='''Logfile group loop. mingrpnum={0},maxgrpnum={1}'''.format(mingrpnum,newstdbygrp) + self.ocommon.log_info_message(msg,self.file_name) + + while threadcount <= maxthread: + group_per_thread_count=1 + while group_per_thread_count <= group_per_thread: + mingrpmemnum=1 + while mingrpmemnum <= maxgrpmemnum: + if mingrpmemnum == 1: + self.add_stdby_log_grp(threadcount,mingrpnum,filesize,dbrdest,connect_str,None) + else: + self.add_stdby_log_grp(threadcount,mingrpnum,filesize,dbrdest,connect_str,'member') + mingrpmemnum = mingrpmemnum + 1 + group_per_thread_count=group_per_thread_count + 1 + mingrpnum = mingrpnum + 1 + threadcount = threadcount + 1 + if mingrpnum >= newstdbygrp: + break + + def add_stdby_log_grp(self,threadcount,stdbygrp,filesize,dbrdest,connect_str,type): + """ + This function will add standby log group + """ + sqlcmd1=None + sqlsetcmd=self.ocommon.get_sqlsetcmd() + if type is None: + sqlcmd1=''' + {3} + ALTER DATABASE ADD STANDBY LOGFILE THREAD {0} group {1} size {2}; + '''.format(threadcount,stdbygrp,filesize,sqlsetcmd) + + if type == 'member': + sqlcmd1=''' + {2} + ALTER DATABASE ADD STANDBY LOGFILE member '{0}' to group {1}; + '''.format(dbrdest,stdbygrp,sqlsetcmd) + + output=self.ocommon.run_sql_cmd(sqlcmd1,connect_str) + + + def populate_tnsfile(self): + """ + Populate TNS file" + """ + stdbydbuname,prmydbuname,prmydbport,prmydbname,prmyscanname=self.get_stdby_variables() + connect_str,osuser,dbhome,dbbase,oinv,osid=self.get_primary_connect_str() + prmyscanname=self.ora_env_dict["PRIMARY_DB_SCAN_NAME"] if self.ocommon.check_key("PRIMARY_DB_SCAN_NAME",self.ora_env_dict) else self.prog_exit("127") + prmyscanport=self.ora_env_dict["PRIMARY_DB_SCAN_PORT"] if self.ocommon.check_key("PRIMARY_DB_SCAN_PORT",self.ora_env_dict) else "1521" + stdbyscanname=self.ora_env_dict["SCAN_NAME"] if self.ocommon.check_key("SCAN_NAME",self.ora_env_dict) else self.prog_exit("127") + stdbyscanport=self.ora_env_dict["SCAN_PORT"] if self.ocommon.check_key("SCAN_PORT",self.ora_env_dict) else "1521" + self.create_local_tns_enteries(dbhome,prmydbuname,prmyscanname,prmyscanport,osuser,"oinstall") + self.create_local_tns_enteries(dbhome,stdbydbuname,stdbyscanname,stdbyscanport,osuser,"oinstall") + self.create_remote_tns_enteries(dbhome,stdbydbuname,connect_str,stdbyscanname,stdbyscanport) + + def create_local_tns_enteries(self,dbhome,dbuname,scan_name,port,osuser,osgroup): + """ + Add enteries in tnsnames.ora + """ + tnsfile='''{0}/network/admin/tnsnames.ora'''.format(dbhome) + status=self.ocommon.check_file(tnsfile,"local",None,None) + key='''{0}='''.format(dbuname) + tnsentry='\n' + '''{2}=(DESCRIPTION = (ADDRESS = (PROTOCOL = TCP)(HOST = {0})(PORT = {1})) (CONNECT_DATA = (SERVER = DEDICATED) (SERVICE_NAME = {2})))'''.format(scan_name,port,dbuname) + + + if status: + fdata=self.ocommon.read_file(tnsfile) + match=re.search(key,fdata,re.MULTILINE) + if not match: + msg='''tnsnames.ora : {1} exist. Populating tnsentry: {0}'''.format(tnsentry,tnsfile) + self.ocommon.log_info_message(msg,self.file_name) + self.ocommon.append_file(tnsfile,tnsentry) + else: + msg='''tnsnames.ora : {1} doesn't exist, creating the file. Populating tnsentry: {0}'''.format(tnsentry,tnsfile) + self.ocommon.log_info_message(msg,self.file_name) + self.ocommon.write_file(tnsfile,tnsentry) + + cmd='''chown {1}:{2} {0}'''.format(tnsfile,osuser,osgroup) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + + def create_remote_tns_enteries(self,dbhome,dbuname,connect_str,scan_name,scan_port): + """ + Add enteries in remote tnsnames.ora + """ + sqlcmd=""" + begin + dbms_scheduler.create_job (job_name => 'OS_JOB', + job_type => 'executable', + job_action => '/opt/scripts/startup/scripts/cmdExec', + number_of_arguments => 4, + auto_drop => TRUE); + dbms_scheduler.set_job_argument_value ('OS_JOB', 1,'sudo'); + dbms_scheduler.set_job_argument_value ('OS_JOB', 2,'/usr/bin/python3'); + dbms_scheduler.set_job_argument_value ('OS_JOB', 3,'/opt/scripts/startup/scripts/main.py'); + dbms_scheduler.set_job_argument_value ('OS_JOB', 4,'--addtns=\"scan_name={0};scan_port={1};db_unique_name={2}\"'); + DBMS_SCHEDULER.RUN_JOB(JOB_NAME => 'OS_JOB',USE_CURRENT_SESSION => TRUE); + end; + / + exit; + """.format(scan_name,scan_port,dbuname) + + output=self.ocommon.run_sql_cmd(sqlcmd,connect_str) + + def copy_tnsfile(self,dbhome,osuser): + """ + Copy TNSfile to remote machine + """ + tnsfile='''{0}/network/admin/tnsnames.ora'''.format(dbhome) + self.ocommon.copy_file_cluster(tnsfile,tnsfile,osuser) + + def create_db(self): + """ + Perform the DB Creation + """ + cmd="" + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + cmd=self.prepare_db_cmd() + + dbpasswd=self.ocommon.get_db_passwd() + self.ocommon.set_mask_str(dbpasswd) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + ### Unsetting the encrypt value to None + self.ocommon.unset_mask_str() + + def prepare_db_cmd(self): + """ + Perform the asm disk group creation + """ + stdbydbuname,prmydbuname,prmydbport,prmydbname,prmyscanname=self.get_stdby_variables() + connect_str,osuser,dbhome,dbbase,oinv,osid=self.get_primary_connect_str() + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + crs_nodes=pub_nodes.replace(" ",",") + + dgname=self.ora_env_dict["CRS_ASM_DISKGROUP"] if self.ocommon.check_key("CRS_ASM_DISKGROUP",self.ora_env_dict) else "+DATA" + dbfiledest=self.ora_env_dict["DB_DATA_FILE_DEST"] if self.ocommon.check_key("DB_DATA_FILE_DEST",self.ora_env_dict) else dgname + stype=self.ora_env_dict["DB_STORAGE_TYPE"] if self.ocommon.check_key("DB_STORAGE_TYPE",self.ora_env_dict) else "ASM" + dbctype=self.ora_env_dict["DB_CONFIG_TYPE"] if self.ocommon.check_key("DB_CONFIG_TYPE",self.ora_env_dict) else "RAC" + prmydbstr='''{0}:{1}/{2}'''.format(prmyscanname,prmydbport,prmydbuname) + initparams=self.get_init_params() + #memorypct=self.get_memorypct() + + rspdata='''su - {0} -c "echo HIDDEN_STRING | {1}/bin/dbca -silent -ignorePrereqFailure -createDuplicateDB \ + -gdbname {2} \ + -sid {3} \ + -createAsStandby \ + -adminManaged \ + -sysPassword HIDDEN_STRING \ + -datafileDestination {4} \ + -storageType {5} \ + -nodelist {6} \ + -useOMF true \ + -remoteDBConnString {7} \ + -initparams {8} \ + -dbUniqueName {3} \ + -databaseConfigType {9}"'''.format(dbuser,dbhome,prmydbname,stdbydbuname,self.ocommon.setdgprefix(dbfiledest),stype,crs_nodes,prmydbstr,initparams,dbctype) + cmd='\n'.join(line.lstrip() for line in rspdata.splitlines()) + + return cmd + + def get_init_params(self): + """ + Perform the asm disk group creation + """ + stdbydbuname,prmydbuname,prmydbport,prmydbname,prmyscanname=self.get_stdby_variables() + connect_str,osuser,dbhome,dbbase,oinv,osid=self.get_primary_connect_str() + + prmydbdg=self.ocommon.get_init_params("db_create_file_dest",connect_str) + prmylsdg=self.ocommon.get_init_params("DB_RECOVERY_FILE_DEST",connect_str) + stdbylgdg,dbrdestsize,stdbydbdg,dgname=self.get_stdby_dg_name() + dbrdest=stdbylgdg + + dbconfig="""DG_CONFIG=({0},{1})""".format(prmydbuname,stdbydbuname) + lgdest1="""LOCATION=USE_DB_RECOVERY_FILE_DEST VALID_FOR=(ALL_LOGFILE,ALL_ROLE) DB_UNIQUE_NAME={0}""".format(stdbydbuname) + lgdest2="""SERVICE={0} ASYNC VALID_FOR=(ONLINE_LOGFILE,PRIMARY_ROLE) DB_UNIQUE_NAME={0}""".format(prmydbuname) + + sgasize=self.ora_env_dict["INIT_SGA_SIZE"] if self.ocommon.check_key("INIT_SGA_SIZE",self.ora_env_dict) else None + pgasize=self.ora_env_dict["INIT_PGA_SIZE"] if self.ocommon.check_key("INIT_PGA_SIZE",self.ora_env_dict) else None + processes=self.ora_env_dict["INIT_PROCESSES"] if self.ocommon.check_key("INIT_PROCESSES",self.ora_env_dict) else None + dbuname=self.ora_env_dict["DB_UNIQUE_NAME"] if self.ocommon.check_key("DB_UNIQUE_NAME",self.ora_env_dict) else "SORCLCDB" + dgname=self.ora_env_dict["CRS_ASM_DISKGROUP"] if self.ocommon.check_key("CRS_ASM_DISKGROUP",self.ora_env_dict) else "+DATA" + dbconv="""'{0}','{1}'""".format(prmydbdg,stdbydbdg) + lgconv="""'{0}','{1}'""".format(prmylsdg,stdbylgdg) + + + cpucount=self.ora_env_dict["CPU_COUNT"] if self.ocommon.check_key("CPU_COUNT",self.ora_env_dict) else None + remotepasswdfile="REMOTE_LOGIN_PASSWORDFILE=EXCLUSIVE" + lgformat="LOG_ARCHIVE_FORMAT=%t_%s_%r.arc" + + initprm="""db_recovery_file_dest={0},db_recovery_file_dest_size={1},db_create_file_dest={2}""".format(dbrdest,dbrdestsize,stdbydbdg,remotepasswdfile,lgformat,stdbydbuname,dbconv,lgconv,prmydbname,dbconfig,lgdest1,lgdest2,prmydbuname) + + #initprm="""db_recovery_file_dest={0},db_recovery_file_dest_size={1},db_create_file_dest={2},{3},{4},db_unique_name={5},db_file_name_convert={6},log_file_name_convert={7},db_name={8},LOG_ARCHIVE_CONFIG='{9}',LOG_ARCHIVE_DEST_1='{10}',LOG_ARCHIVE_DEST_2='{11}',STANDBY_FILE_MANAGEMENT='AUTO',FAL_SERVER={12}""".format(dbrdest,dbrdestsize,stdbydbdg,remotepasswdfile,lgformat,stdbydbuname,dbconv,lgconv,prmydbname,dbconfig,lgdest1,lgdest2,prmydbuname) + + if sgasize: + initprm= initprm + ''',sga_target={0},sga_max_size={0}'''.format(sgasize) + + if pgasize: + initprm= initprm + ''',pga_aggregate_size={0}'''.format(pgasize) + + if processes: + initprm= initprm + ''',processes={0}'''.format(processes) + + if cpucount: + initprm= initprm + ''',cpu_count={0}'''.format(cpucount) + + initparams='''{0}'''.format(initprm) + + return initparams + + def configure_standby_db(self): + """ + Setup standby after creation using DBCA + """ + stdbydbuname,prmydbuname,prmydbport,prmydbname,prmyscanname=self.get_stdby_variables() + connect_str,osuser,dbhome,dbbase,oinv,osid=self.get_standby_connect_str() + stdbyscanname=self.ora_env_dict["SCAN_NAME"] if self.ocommon.check_key("SCAN_NAME",self.ora_env_dict) else self.prog_exit("127") + stdbyscanport=self.ora_env_dict["SCAN_PORT"] if self.ocommon.check_key("SCAN_PORT",self.ora_env_dict) else "1521" + prmytnssvc=self.ocommon.get_tnssvc_str(prmydbuname,prmydbport,prmyscanname) + stdbytnssvc=self.ocommon.get_tnssvc_str(stdbydbuname,stdbyscanport,stdbyscanname) + + msg='''Setting parameters in standby DB''' + self.ocommon.log_info_message(msg,self.file_name) + stdbylgdg,dbrdestsize,stdbydbdg,dgname=self.get_stdby_dg_name() + lgdest1="""LOCATION=USE_DB_RECOVERY_FILE_DEST VALID_FOR=(ALL_LOGFILES,ALL_ROLES) DB_UNIQUE_NAME={0}""".format(stdbydbuname) + lgdest2='''SERVICE="{0}" ASYNC VALID_FOR=(ONLINE_LOGFILE,PRIMARY_ROLE) DB_UNIQUE_NAME={1}'''.format(prmytnssvc,prmydbuname) + + + sqlcmd=""" + alter system set LOG_ARCHIVE_CONFIG='DG_CONFIG=({2},{3})' scope=both sid='*'; + alter system set dg_broker_config_file1='{4}' scope=spfile sid='*'; + alter system set dg_broker_config_file2='{4}' scope=spfile sid='*'; + alter system set FAL_SERVER='{5}' scope=both sid='*'; + alter system set dg_broker_start=true scope=both sid='*'; + alter system set LOG_ARCHIVE_DEST_1='{0}' scope=both sid='*'; + alter system set LOG_ARCHIVE_DEST_2='{1}' scope=both sid='*'; + alter system set LOG_ARCHIVE_DEST_STATE_1=ENABLE scope=both sid='*'; + alter system set LOG_ARCHIVE_DEST_STATE_2=ENABLE scope=both sid='*'; + alter system set DB_FILES=1024 scope=spfile sid='*'; + alter system set LOG_BUFFER=256M scope=spfile sid='*'; + alter system set DB_BLOCK_CHECKSUM='TYPICAL' scope=spfile sid='*'; + alter system set DB_LOST_WRITE_PROTECT='TYPICAL' scope=spfile sid='*'; + alter system set DB_FLASHBACK_RETENTION_TARGET=120 scope=spfile sid='*'; + alter system set PARALLEL_THREADS_PER_CPU=1 scope=spfile sid='*'; + alter database recover managed standby database cancel; + alter database flashback on; + alter database recover managed standby database disconnect; + """.format(lgdest1,lgdest2,prmydbuname,stdbydbuname,stdbydbdg,prmytnssvc) + + output=self.ocommon.run_sql_cmd(sqlcmd,connect_str) + hostname = self.ocommon.get_public_hostname() + self.ocommon.stop_rac_db(osuser,dbhome,stdbydbuname,hostname) + self.ocommon.start_rac_db(osuser,dbhome,stdbydbuname,hostname,None) + + def configure_dgsetup(self): + """ + Setup Data Guard + """ + stdbydbuname,prmydbuname,prmydbport,prmydbname,prmyscanname=self.get_stdby_variables() + osuser,dbhome,dbbase,oinv=self.ocommon.get_db_params() + hostname = self.ocommon.get_public_hostname() + inst_sid=self.ocommon.get_inst_sid(osuser,dbhome,stdbydbuname,hostname) + connect_str=self.ocommon.get_dgmgr_str(dbhome,inst_sid,osuser,"sys","HIDDEN_STRING",prmyscanname,prmydbport,prmydbuname,None,"sysdba",None) + stdbyscanname=self.ora_env_dict["SCAN_NAME"] if self.ocommon.check_key("SCAN_NAME",self.ora_env_dict) else self.prog_exit("127") + stdbyscanport=self.ora_env_dict["SCAN_PORT"] if self.ocommon.check_key("SCAN_PORT",self.ora_env_dict) else "1521" + prmytnssvc=self.ocommon.get_tnssvc_str(prmydbuname,prmydbport,prmyscanname) + stdbytnssvc=self.ocommon.get_tnssvc_str(stdbydbuname,stdbyscanport,stdbyscanname) + + dgcmd=''' + create configuration '{0}' as primary database is {0} connect identifier is "{2}"; + ADD DATABASE {1} AS CONNECT IDENTIFIER IS "{3}"; + enable configuration; + exit; + '''.format(prmydbuname,stdbydbuname,prmytnssvc,stdbytnssvc) + dbpasswd=self.ocommon.get_db_passwd() + self.ocommon.set_mask_str(dbpasswd) + output,error,retcode=self.ocommon.run_sqlplus(connect_str,dgcmd,None) + self.ocommon.log_info_message("Calling check_sql_err() to validate the sql command return status",self.file_name) + self.ocommon.check_dgmgrl_err(output,error,retcode,None) + self.ocommon.unset_mask_str() + + + def restart_db(self): + """ + restart DB + """ + stdbydbuname,prmydbuname,prmydbport,prmydbname,prmyscanname=self.get_stdby_variables() + connect_str,osuser,dbhome,dbbase,oinv,osid=self.get_standby_connect_str() + hostname = self.ocommon.get_public_hostname() + self.ocommon.stop_rac_db(osuser,dbhome,stdbydbuname,hostname) + self.ocommon.start_rac_db(osuser,dbhome,stdbydbuname,hostname,None) + + diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/orasetupenv.py b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/orasetupenv.py new file mode 100644 index 0000000000..7909102fb3 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/orasetupenv.py @@ -0,0 +1,694 @@ +#!/usr/bin/python3 + +############################# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ + +""" + This file contains to the code call different classes objects based on setup type +""" + +from oralogger import * +from oraenv import * +from oracommon import * +from oramachine import * +from orasshsetup import * +from oracvu import * + +import os +import re +import sys +import itertools +from time import sleep, perf_counter +#from threading import Thread +from multiprocessing import Process + +class OraSetupEnv: + """ + This class setup the env before setting up the rac env + """ + def __init__(self,oralogger,orahandler,oraenv,oracommon,oracvu,orasetupssh): + try: + self.ologger = oralogger + self.ohandler = orahandler + self.oenv = oraenv.get_instance() + self.ocommon = oracommon + self.ocvu = oracvu + self.osetupssh = orasetupssh + self.ora_env_dict = oraenv.get_env_vars() + self.file_name = os.path.basename(__file__) + except BaseException as ex: + ex_type, ex_value, ex_traceback = sys.exc_info() + trace_back = sys.tracebacklimit.extract_tb(ex_traceback) + stack_trace = list() + for trace in trace_back: + stack_trace.append("File : %s , Line : %d, Func.Name : %s, Message : %s" % (trace[0], trace[1], trace[2], trace[3])) + self.ocommon.log_info_message(ex_type.__name__,self.file_name) + self.ocommon.log_info_message(ex_value,self.file_name) + self.ocommon.log_info_message(stack_trace,self.file_name) + + def setup(self): + """ + This function setup the grid on this machine + """ + self.ocommon.log_info_message("Start setup()",self.file_name) + ct = datetime.datetime.now() + bts = ct.timestamp() + if self.ocommon.check_key("RESET_PASSWORD",self.ora_env_dict): + self.ocommon.log_info_message("RESET_PASSWORD variable is set. Resetting the OS password for users: " + self.ora_env_dict["RESET_PASSWORD"],self.file_name) + for user in self.ora_env_dict["RESET_PASSWORD"].split(','): + self.ocommon.reset_os_password(user) + elif self.ocommon.check_key("CUSTOM_RUN_FLAG",self.ora_env_dict): + self.populate_env_vars() + else: + if self.ocommon.check_key("DBCA_RESPONSE_FILE",self.ora_env_dict): + self.ocommon.update_rac_env_vars_from_rspfile(self.ora_env_dict["DBCA_RESPONSE_FILE"]) + if not self.ocommon.check_key("SINGLE_NETWORK",self.ora_env_dict): + install_node,pubhost=self.ocommon.get_installnode() + if install_node.lower() == pubhost.lower(): + if not self.ocommon.check_key("GRID_RESPONSE_FILE",self.ora_env_dict): + self.validate_private_nodes() + self.ocommon.update_domainfrom_resolvconf_file() + self.populate_env_vars() + self.env_var_checks() + self.stdby_env_var_checks() + self.set_gateway() + self.add_ntp_conf() + self.touch_fstab() + self.reset_systemd() + self.check_systemd() + self.set_ping_permission() + self.set_common_script() + self.set_asmdev_perm() + self.add_domain_search() + self.add_dns_servers() + self.populate_etchosts("localhost") + self.populate_user_profiles() + #self.setup_ssh_for_k8s() + self.setup_gi_sw() + self.reset_grid_user_passwd() + self.setup_db_sw() + self.reset_db_user_passwd() + # self.ocommon.log_info_message("Start crs_sw_install()",self.file_name) + # self.crs_sw_install() + # self.ocommon.log_info_message("End crs_sw_install()",self.file_name) + self.setup_ssh_for_k8s() + self.set_banner() + + ct = datetime.datetime.now() + ets = ct.timestamp() + totaltime=ets - bts + self.ocommon.log_info_message("Total time for setup() = [ " + str(round(totaltime,3)) + " ] seconds",self.file_name) + + ########### SETUP_MACHINE ENDS here #################### + + ## Function to perfom DB checks ###### + def populate_env_vars(self): + """ + Populate the env vars if not set + """ + self.ocommon.populate_rac_env_vars() + if not self.ocommon.check_key("CRS_NODES",self.ora_env_dict): + msg="CRS_NODES is not passed as an env variable. If CRS_NODES is not passed as env variable then user must pass PUBLIC_HOSTS,VIRTUAL_HOSTS and PRIVATE_HOST as en env variable so that CRS_NODES can be populated." + self.ocommon.log_error_message(msg,self.file_name) + self.populate_crs_nodes() + + def populate_crs_nodes(self): + """ + Populate CRS_NODES variable using PUBLIC_HOSTS,VIRTUAL_HOSTS and PRIVATE_HOSTS + """ + pub_node_list=[] + virt_node_list=[] + priv_node_list=[] + + crs_nodes="" + if not self.ocommon.check_key("PUBLIC_HOSTS",self.ora_env_dict): + self.ocommon.log_error_message("PUBLIC_HOSTS list is not found in env variable list.Exiting...",self.file_name) + self.ocommon.prog_exit("127") + else: + pub_node_list=self.ora_env_dict["PUBLIC_HOSTS"].split(",") + + if not self.ocommon.check_key("VIRTUAL_HOSTS",self.ora_env_dict): + self.ocommon.log_error_message("VIRTUAL_HOSTS list is not found in env variable list.Exiting...",self.file_name) + self.ocommon.prog_exit("127") + else: + virt_node_list=self.ora_env_dict["VIRTUAL_HOSTS"].split(",") + + if not self.ocommon.check_key("PRIVATE_IP1_LIST",self.ora_env_dict) and not self.ocommon.check_key("PRIVATE_IP2_LIST",self.ora_env_dict): + if self.ocommon.check_key("PRIVATE_HOSTS",self.ora_env_dict): + priv_node_list=self.ora_env_dict["PRIVATE_HOSTS"].split(",") + + if not self.ocommon.check_key("SINGLE_NETWORK",self.ora_env_dict): + if len(pub_node_list) == len(virt_node_list) and len(pub_node_list) == len(priv_node_list): + for (pubnode,vipnode,privnode) in zip(pub_node_list,virt_node_list,priv_node_list): + crs_nodes= crs_nodes + "pubhost=" + pubnode + "," + "viphost=" + vipnode + "," + "privhost=" + privnode + ";" + else: + if len(pub_node_list) == len(virt_node_list): + for (pubnode,vipnode,privnode) in zip(pub_node_list,virt_node_list): + crs_nodes= crs_nodes + "pubhost=" + pubnode + "," + "viphost=" + vipnode + ";" + else: + self.ocommon.log_error_message("public node and virtual host node count is not equal",self.file_name) + self.ocommon.prog_exit("127") + else: + if len(pub_node_list) == len(virt_node_list): + for (pubnode,vipnode,privnode) in zip(pub_node_list,virt_node_list): + crs_nodes= crs_nodes + "pubhost=" + pubnode + "," + "viphost=" + vipnode + ";" + + crs_nodes=crs_nodes.strip(";") + self.ora_env_dict=self.ocommon.add_key("CRS_NODES",crs_nodes,self.ora_env_dict) + self.ocommon.log_info_message("CRS_NODES is populated: " + self.ora_env_dict["CRS_NODES"] ,self.file_name) + + def validate_private_nodes(self): + """ + This function validate the private network + """ + priv_node_status=False + + if self.ocommon.check_key("PRIVATE_HOSTS",self.ora_env_dict): + priv_node_status=True + priv_node_list=self.ora_env_dict["PRIVATE_HOSTS"].split(",") + else: + self.ocommon.log_info_message("PRIVATE_HOSTS is not set.",self.file_name) + + if self.ocommon.check_key("PRIVATE_IP1_LIST",self.ora_env_dict): + priv_node_status=True + priv_ip1_list=self.ora_env_dict["PRIVATE_IP1_LIST"].split(",") + for ip in priv_ip1_list: + self.ocommon.ping_ip(ip,True) + else: + self.ocommon.log_info_message("PRIVATE_IP1_LIST is not set.",self.file_name) + + if self.ocommon.check_key("PRIVATE_IP2_LIST",self.ora_env_dict): + priv_node_status=True + priv_ip2_list=self.ora_env_dict["PRIVATE_IP2_LIST"].split(",") + for ip in priv_ip2_list: + self.ocommon.ping_ip(ip,True) + else: + self.ocommon.log_info_message("PRIVATE_IP2_LIST is not set.",self.file_name) + + if not priv_node_status: + self.ocommon.log_error_message("PRIVATE_HOSTS or PRIVATE_IP1_LIST or PRIVATE_IP2_LIST list is not found in env variable list.Exiting...",self.file_name) + self.ocommon.prog_exit("127") + + def env_var_checks(self): + """ + check the env vars + """ + self.ocommon.check_env_variable("GRID_HOME",True) + self.ocommon.check_env_variable("GRID_BASE",True) + self.ocommon.check_env_variable("INVENTORY",True) + self.ocommon.check_env_variable("DB_HOME",False) + self.ocommon.check_env_variable("DB_BASE",False) + + def stdby_env_var_checks(self): + """ + Check the stby env variable + """ + if self.ocommon.check_key("OP_TYPE",self.ora_env_dict): + if self.ora_env_dict["OP_TYPE"] == 'setupracstandby': + self.ocommon.check_env_variable("DB_UNIQUE_NAME",False) + self.ocommon.check_env_variable("PRIMARY_DB_SCAN_PORT",False) + self.ocommon.check_env_variable("PRIMARY_DB_NAME",True) + self.ocommon.check_env_variable("PRIMARY_DB_SERVICE_NAME",False) + self.ocommon.check_env_variable("PRIMARY_DB_UNIQUE_NAME",True) + self.ocommon.check_env_variable("PRIMARY_DB_SCAN_NAME",True) + + def set_gateway(self): + """ + Set the default gateway + """ + if self.ocommon.check_key("DEFAULT_GATEWAY",self.ora_env_dict): + self.ocommon.log_info_message("DEFAULT_GATEWAY variable is set. Validating the gateway gw",self.file_name) + if self.ocommon.validate_ip(self.ora_env_dict["DEFAULT_GATEWAY"]): + cmd='''ip route; ip route del default''' + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + ### Set the Default gw + self.ocommon.log_info_message("Setting default gateway based on new gateway setting",self.file_name) + cmd='''route add default gw {0}'''.format(self.ora_env_dict["DEFAULT_GATEWAY"]) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + else: + self.ocommon.log_error_message("DEFAULT_GATEWAY IP is not correct. Exiting..",self.file_name) + self.ocommon.prog_exit("NONE") + else: + self.ocommon.log_info_message("DEFAULT_GATEWAY variable is not set.",self.file_name) + + def add_ntp_conf(self): + """ + This function start the NTP daemon + """ + if self.ocommon.check_key("NTP_START",self.ora_env_dict): + self.ocommon.log_info_message("NTP_START variable is set. Touching /etc/ntpd.conf",self.file_name) + cmd='''touch /etc/ntp.conf''' + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + ### Start NTP + self.ocommon.log_info_message("NTP_START variable is set. Starting NTPD",self.file_name) + cmd='''systemctl start ntpd''' + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + + def populate_etchosts(self,entry): + """ + Populating hosts file + """ + cmd=None + etchostfile="/etc/hosts" + if not self.ocommon.detect_k8s_env(): + if self.ocommon.check_key("HOSTFILE",self.ora_env_dict): + if os.path.exists(self.ora_env_dict["HOSTFILE"]): + cmd='''cat {0} > /etc/hosts'''.format(self.ora_env_dict["HOSTFILE"]) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + else: + lentry='''127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 \n::1 localhost localhost.localdomain localhost6 localhost6.localdomain6''' + + self.write_etchost("localhost.localdomain",etchostfile,"write",lentry) + if self.ocommon.check_key("CRS_NODES",self.ora_env_dict): + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + pub_nodes1=pub_nodes.replace(" ",",") + vip_nodes1=vip_nodes.replace(" ",",") + for node in pub_nodes1.split(","): + self.ocommon.log_info_message("The node set to :" + node + "-" + pub_nodes1,self.file_name) + self.write_etchost(node,etchostfile,"append",None) + for node in vip_nodes1.split(","): + self.write_etchost(node,etchostfile,"append",None) + + def write_etchost(self,node,file,mode,lentry): + """ + This funtion write an entry to /etc/host if the entry doesn't exit + """ + if mode == 'append': + #fdata=self.ocommon.read_file(file) + #match=re.search(node,fdata,re.MULTILINE) + #if not match: + domain=self.ora_env_dict["PUBLIC_HOSTS_DOMAIN"] + self.ocommon.log_info_message("Domain is :" + self.ora_env_dict["PUBLIC_HOSTS_DOMAIN"],self.file_name) + self.ocommon.log_info_message("The hostname :" + node + "." + domain,self.file_name) + ip=self.ocommon.get_ip(node,domain) + # self.ocommon.log_info_message(" The Ip set to :", ip) + entry='''{0} {1} {2}'''.format(ip,node + "." + domain,node) + # self.ocommon.log_info_message(" The entry set to :", entry) + cmd='''echo {0} >> {1}'''.format(entry,file) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + elif mode == 'write': + #fdata=self.ocommon.read_file(file) + #match=re.search(node,fdata,re.MULTILINE) + #if not match: + #self.ocommon.log_info_message(" The lentry set to :", lentry) + cmd='''echo "{0}" > "{1}"'''.format(lentry,file) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + else: + pass + + def touch_fstab(self): + """ + This function toch fstab + """ + cmd='''touch /etc/fstab''' + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + + def reset_systemd(self): + """ + This function reset the systemd + """ + self.ocommon.log_info_message("Checking systemd failed units.",self.file_name) + cmd="""systemctl | grep failed | awk '{ print $2 }'""" + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + self.ocommon.log_info_message("Disabling failed units.",self.file_name) + if output: + for svc in output.split('\n'): + if svc: + cmd='''systemctl disable {0}'''.format(svc) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + self.ocommon.log_info_message("Resetting systemd.",self.file_name) + cmd='''systemctl reset-failed''' + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + + def check_systemd(self): + """ + This function check systemd and exit the program if systemd status is not running + """ + self.ocommon.log_info_message("Checking systemd. It must be in running state to setup clusterware inside containers for clusterware.",self.file_name) + cmd="""systemctl status | awk '/State:/{ print $0 }' | grep -v 'awk /State:/' | awk '{ print $2 }'""" + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + if 'running' in output: + self.ocommon.log_info_message("Systemctl status check passed!",self.file_name) + else: + self.ocommon.log_error_message("Systemctl is not in running state.",self.file_name) + #self.ocommon.prog_exit("None") + + def set_ping_permission(self): + """ + setting ping permission + """ + pass + #self.ocommon.log_info_message("Setting ping utility permissions so that it works correctly inside container",self.file_name) + #cmd='''chmod 6755 /usr/bin/ping;chmod 6755 /bin/ping''' + #output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + #self.ocommon.check_os_err(output,error,retcode,None) + + def set_common_script(self): + """ + This function set the 775 permission on common script dir + """ + if self.ocommon.check_key("COMMON_SCRIPTS",self.ora_env_dict): + self.ocommon.log_info_message("COMMON_SCRIPTS variable is set.",self.file_name) + if os.path.isdir(self.ora_env_dict["COMMON_SCRIPTS"]): + self.ocommon.log_info_message("COMMON_SCRIPT variable is set. Changing permissions and ownership",self.file_name) + cmd='''chown -R grid:oinstall {0}; chmod 775 {0}'''.format(self.ora_env_dict["COMMON_SCRIPTS"]) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + else: + self.ocommon.log_info_message("COMMON_SCRIPT variable is set but directory doesn't exist!",self.file_name) + + def set_asmdev_perm(self): + """ + This function set the correct permissions for ASM Disks + """ + self.ocommon.set_asmdisk_perm("CRS_ASM_DEVICE_LIST",True) + self.ocommon.set_asmdisk_perm("REDO_ASM_DEVICE_LIST",None) + self.ocommon.set_asmdisk_perm("RECO_ASM_DEVICE_LIST",None) + self.ocommon.set_asmdisk_perm("DB_ASM_DEVICE_LIST",None) + if self.ocommon.check_key("CLUSTER_TYPE",self.ora_env_dict): + if self.ora_env_dict["CLUSTER_TYPE"] == 'DOMAIN': + if self.ocommon.check_key("GIMR_ASM_DEVICE_LIST",self.ora_env_dict): + self.ocommon.set_asmdisk_perm("GIMR_ASM_DEVICE_LIST",True) + + ## Function add DOMAIN Server + def add_domain_search(self): + """ + This function update search in /etc/resolv.conf + """ + dns_search_flag=None + search_domain='search' + if self.ocommon.check_key("PUBLIC_HOSTS_DOMAIN",self.ora_env_dict): + self.ocommon.log_info_message("PUBLIC_HOSTS_DOMAIN variable is set. Populating /etc/resolv.conf.",self.file_name) + dns_search_flag=True + for domain in self.ora_env_dict["PUBLIC_HOSTS_DOMAIN"].split(','): + search_domain = search_domain + ' ' + domain + + if self.ocommon.check_key("PRIVATE_HOSTS_DOMAIN",self.ora_env_dict): + self.ocommon.log_info_message("PRIVATE_HOSTS_DOMAIN variable is set. Populating /etc/resolv.conf.",self.file_name) + dns_search_flag=True + for domain in self.ora_env_dict["PRIVATE_HOSTS_DOMAIN"].split(','): + search_domain = search_domain + ' ' + domain + + if self.ocommon.check_key("CUSTOM_DOMAIN",self.ora_env_dict): + self.ocommon.log_info_message("CUSTOM_DOMAIN variable is set. Populating /etc/resolv.conf.",self.file_name) + dns_search_flag=True + for domain in self.ora_env_dict["CUSTOM_DOMAIN"].split(','): + search_domain = search_domain + ' ' + domain + + if dns_search_flag: + self.ocommon.log_info_message("Search Domain {0} is ready. Adding enteries in /etc/resolv.conf".format(search_domain),self.file_name) + cmd='''echo "{0}" > /etc/resolv.conf'''.format(search_domain) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + + ## Function to perfom grid sw installation ###### + def add_dns_servers(self): + """ + This function add the dns servers + """ + if self.ocommon.check_key("DNS_SERVERS",self.ora_env_dict): + self.ocommon.log_info_message("DNS_SERVERS variable is set. Populating /etc/resolv.conf with DNS servers.",self.file_name) + for server in self.ora_env_dict["DNS_SERVERS"].split(','): + if server not in open('/etc/resolv.conf').read(): + cmd='''echo "nameserver {0}" >> /etc/resolv.conf'''.format(server) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + else: + self.ocommon.log_info_message("DNS_SERVERS variable is not set.",self.file_name) + + ## Function to perfom oracle sw installation ###### + def setup_gi_sw(self): + """ + This function unzip the Grid and Oracle Software + """ + gihome="" + oinv="" + gibase="" + giuser="" + gigrp="" + giswfie="" + + if self.ocommon.check_key("COPY_GRID_SOFTWARE",self.ora_env_dict): + giuser,gihome,gibase,oinv=self.ocommon.get_gi_params() + gigrp=self.ora_env_dict["OINSTALL"] + + self.ocommon.log_info_message("Copy Software flag is set",self.file_name) + self.ocommon.log_info_message("Setting up a oracle invetnory directory!",self.file_name) + self.setup_sw_dirs(oinv,giuser,gigrp) + self.ocommon.log_info_message("Setting up Grid_BASE directory!",self.file_name) + self.setup_sw_dirs(gibase,giuser,gigrp) + self.ocommon.log_info_message("Setting up Grid_HOME directory!",self.file_name) + self.setup_sw_dirs(gihome,giuser,gigrp) + ### Unzipping Gi Software + if self.ocommon.check_key("OP_TYPE",self.ora_env_dict): + #if self.ocommon.check_key("OP_TYPE",self.ora_env_dict) and any(optype == self.ora_env_dict["OP_TYPE"] for optype not in ("racaddnode")): + if self.ocommon.check_env_variable("STAGING_SOFTWARE_LOC",True) and self.ocommon.check_env_variable("GRID_SW_ZIP_FILE",True): + giswfile=self.ora_env_dict["STAGING_SOFTWARE_LOC"] + "/" + self.ora_env_dict["GRID_SW_ZIP_FILE"] + if os.path.isfile(giswfile): + dir = os.listdir(gihome) + if len(dir) == 0: + self.ocommon.log_info_message("Grid software file is set : " + giswfile ,self.file_name) + self.ocommon.log_info_message("Starting grid software unzipping file",self.file_name) + cmd='''su - {0} -c \" unzip -q {1} -d {2}\"'''.format(giuser,giswfile,gihome) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + self.ora_env_dict=self.ocommon.add_key("GI_SW_UNZIPPED_FLAG","true",self.ora_env_dict) + else: + self.ocommon.log_error_message("Oracle GI home directory is not empty. Skipping software Unzipping...",self.file_name) + else: + install_node,pubhost=self.ocommon.get_installnode() + if install_node.lower() == pubhost.lower(): + self.ocommon.log_error_message("Grid software file " + giswfile + " doesn't exist. Exiting...",self.file_name) + self.ocommon.prog_exit("127") + else: + self.ocommon.log_info_message("DB software file " + giswfile + " doesn't exist. Software will be copied from install node..." + install_node.lower(),self.file_name) + + ## Function to unzip the software + def setup_db_sw(self): + """ + unzip the software + """ + dbhome="" + dbbase="" + dbuser="" + gigrp="" + dbswfile="" + + if self.ocommon.check_key("COPY_DB_SOFTWARE",self.ora_env_dict): + dbuser,dbhome,dbbase,oinv=self.ocommon.get_db_params() + gigrp=self.ora_env_dict["OINSTALL"] + + self.ocommon.log_info_message("Copy Software flag is set",self.file_name) + self.ocommon.log_info_message("Setting up ORACLE_BASE directory!",self.file_name) + self.setup_sw_dirs(dbbase,dbuser,gigrp) + self.ocommon.log_info_message("Setting up DB_HOME directory!",self.file_name) + self.setup_sw_dirs(dbhome,dbuser,gigrp) + ### Unzipping Gi Software + if self.ocommon.check_key("OP_TYPE",self.ora_env_dict): + #if self.ocommon.check_key("OP_TYPE",self.ora_env_dict) and any(optype == self.ora_env_dict["OP_TYPE"] for optype not in ("racaddnode")): + if self.ocommon.check_env_variable("STAGING_SOFTWARE_LOC",True) and self.ocommon.check_env_variable("DB_SW_ZIP_FILE",True): + dbswfile=self.ora_env_dict["STAGING_SOFTWARE_LOC"] + "/" + self.ora_env_dict["DB_SW_ZIP_FILE"] + if os.path.isfile(dbswfile): + dir = os.listdir(dbhome) + if len(dir) == 0: + self.ocommon.log_info_message("DB software file is set : " + dbswfile , self.file_name) + self.ocommon.log_info_message("Starting db software unzipping file",self.file_name) + cmd='''su - {0} -c \" unzip -q {1} -d {2}\"'''.format(dbuser,dbswfile,dbhome) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + self.ora_env_dict=self.ocommon.add_key("RAC_SW_UNZIPPED_FLAG","true",self.ora_env_dict) + else: + self.ocommon.log_error_message("Oracle DB home directory is not empty. Skipping software Unzipping...",self.file_name) + else: + install_node,pubhost=self.ocommon.get_installnode() + if install_node.lower() == pubhost.lower(): + self.ocommon.log_error_message("DB software file " + dbswfile + " doesn't exist. Exiting...",self.file_name) + self.ocommon.prog_exit("127") + else: + self.ocommon.log_info_message("DB software file " + dbswfile + " doesn't exist. Software will be copied from install node..." + install_node.lower(),self.file_name) + + def setup_sw_dirs(self,dir,user,group): + """ + This function setup the Oracle Software directories if not already created + """ + if os.path.isdir(dir): + self.ocommon.log_info_message("Directory " + dir + " already exist!",self.file_name) + else: + self.ocommon.log_info_message("Creating dir " + dir,self.file_name) + cmd='''mkdir -p {0}'''.format(dir) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + #### + self.ocommon.log_info_message("Changing the permissions of directory",self.file_name) + cmd='''chown -R {0}:{1} {2}'''.format(user,group,dir) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,True) + +###### Checking GI Home ####### + def reset_grid_user_passwd(self): + """ + This function check the Gi home and if it is not setup the it will reset the GI user password + """ + if self.ocommon.check_key("OP_TYPE",self.ora_env_dict): + if self.ora_env_dict["OP_TYPE"] == 'nosetup': + if not self.ocommon.check_key("SSH_PRIVATE_KEY",self.ora_env_dict) and not self.ocommon.check_key("SSH_PUBLIC_KEY",self.ora_env_dict): + user=self.ora_env_dict["GRID_USER"] + self.ocommon.log_info_message("Resetting OS Password for OS user : " + user,self.file_name) + self.ocommon.reset_os_password(user) + +###### Checking RAC Home ####### + def reset_db_user_passwd(self): + """ + This function check the RAC home and if it is not setup the it will reset the DB user password + """ + if self.ocommon.check_key("OP_TYPE",self.ora_env_dict): + if self.ora_env_dict["OP_TYPE"] == 'nosetup': + if not self.ocommon.check_key("SSH_PRIVATE_KEY",self.ora_env_dict) and not self.ocommon.check_key("SSH_PUBLIC_KEY",self.ora_env_dict): + user=self.ora_env_dict["DB_USER"] + self.ocommon.log_info_message("Resetting OS Password for OS user : " + user,self.file_name) + self.ocommon.reset_os_password(user) + +###### Setting up parallel Oracle and Grid User setup using Keys #### + def setup_ssh_using_keys(self,sshi): + """ + Setting up ssh using keys + """ + self.ocommon.log_info_message("I am in setup_ssh_using_keys",self.file_name) + uohome=sshi.split(":") + self.ocommon.log_info_message("I am in setup_ssh_using_keys + uhome[0] and uhome[1]",self.file_name) + self.osetupssh.setupsshdirs(uohome[0],uohome[1],None) + self.osetupssh.setupsshusekey(uohome[0],uohome[1],None) + #self.osetupssh.verifyssh(uohome[0],None) + +###### Setting up ssh for K8s ####### + def setup_ssh_for_k8s(self): + """ + This function setup ssh using private and public key in K8s env + """ + if self.ocommon.check_key("SSH_PRIVATE_KEY",self.ora_env_dict) and self.ocommon.check_key("SSH_PUBLIC_KEY",self.ora_env_dict): + if self.ocommon.check_file(self.ora_env_dict["SSH_PRIVATE_KEY"],True,None,None) and self.ocommon.check_file(self.ora_env_dict["SSH_PUBLIC_KEY"],True,None,None): + self.ocommon.log_info_message("Begin SSH Setup using SSH_PRIVATE_KEY and SSH_PUBLIC_KEY",self.file_name) + giuser,gihome,obase,invloc=self.ocommon.get_gi_params() + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + SSH_USERS=giuser + ":" + gihome,dbuser + ":" + dbhome + for sshi in SSH_USERS: + self.setup_ssh_using_keys(sshi) + + self.ocommon.log_info_message("End SSH Setup using SSH_PRIVATE_KEY and SSH_PUBLIC_KEY",self.file_name) + else: + if self.ocommon.detect_k8s_env(): + self.ocommon.log_error_message("SSH_PRIVATE_KEY and SSH_PUBLIC_KEY is ot set in K8s env. Exiting..",self.file_name) + self.ocommon.prog_exit("127") +###### Install CRS Software on node ###### + def crs_sw_install(self): + """ + This function performs the crs software install on all the nodes + """ + giuser,gihome,gibase,oinv=self.ocommon.get_gi_params() + status=True + if not self.ocommon.check_key("GI_HOME_INSTALLED_FLAG",self.ora_env_dict): + status=self.ocommon.check_home_inv(None,gihome,giuser) + if not status and self.ocommon.check_key("COPY_GRID_SOFTWARE",self.ora_env_dict): + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + crs_nodes=pub_nodes.replace(" ",",") + osdba=self.ora_env_dict["OSDBA_GROUP"] if self.ocommon.check_key("OSDBA",self.ora_env_dict) else "asmdba" + osoper=self.ora_env_dict["OSPER_GROUP"] if self.ocommon.check_key("OSPER_GROUP",self.ora_env_dict) else "asmoper" + osasm=self.ora_env_dict["OSASM_GROUP"] if self.ocommon.check_key("OSASM_GROUP",self.ora_env_dict) else "asmadmin" + unixgrp="oinstall" + hostname=self.ocommon.get_public_hostname() + lang=self.ora_env_dict["LANGUAGE"] if self.ocommon.check_key("LANGUAGE",self.ora_env_dict) else "en" + node=hostname + copyflag=" -noCopy " + if not self.ocommon.check_key("COPY_GRID_SOFTWARE",self.ora_env_dict): + copyflag=" -noCopy " + oraversion=self.ocommon.get_rsp_version("INSTALL",None) + version=oraversion.split(".",1)[0].strip() + + #self.crs_sw_install_on_node(giuser,copyflag,crs_nodes,oinv,gihome,gibase,osdba,osoper,osasm,version,node) + self.ocommon.log_info_message("Running CRS Sw install on node " + node,self.file_name) + self.ocommon.crs_sw_install_on_node(giuser,copyflag,crs_nodes,oinv,gihome,gibase,osdba,osoper,osasm,version,node) + self.ocommon.run_orainstsh_local(giuser,node,oinv) + self.ocommon.run_rootsh_local(gihome,giuser,node) + +###### Setting up ssh for K8s ####### + def populate_user_profiles(self): + """ + This function setup the user profiles if the env is k8s + """ + giuser,gihome,obase,invloc=self.ocommon.get_gi_params() + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + gipath='''{0}/bin:/bin:/usr/bin:/sbin:/usr/local/bin'''.format(gihome) + dbpath='''{0}/bin:/bin:/usr/bin:/sbin:/usr/local/bin'''.format(dbhome) + gildpath='''{0}/lib:/lib/:/usr/lib'''.format(gihome) + dbldpath='''{0}/lib:/lib/:/usr/lib'''.format(dbhome) + cdgihome='''cd {0}'''.format(gihome) + cddbhome='''cd {0}'''.format(dbhome) + cdgilogs='''cd {0}/diag/crs/*/crs/trace'''.format(obase) + cddblogs='''cd {0}/diag/rdbms/'''.format(dbase) + cdinvlogs='''cd {0}/logs'''.format(invloc) + + if not self.ocommon.check_key("PROFILE_FLAG",self.ora_env_dict): + self.ora_env_dict=self.ocommon.add_key("PROFILE_FLAG","TRUE",self.ora_env_dict) + + tmpdir=self.ocommon.get_tmpdir() + self.ocommon.set_user_profile(giuser,"TMPDIR",tmpdir,"export") + self.ocommon.set_user_profile(giuser,"TEMP",tmpdir,"export") + self.ocommon.set_user_profile(dbuser,"TMPDIR",tmpdir,"export") + self.ocommon.set_user_profile(dbuser,"TEMP",tmpdir,"export") + if self.ocommon.check_key("PROFILE_FLAG",self.ora_env_dict): + self.ocommon.set_user_profile(giuser,"ORACLE_HOME",gihome,"export") + self.ocommon.set_user_profile(giuser,"GRID_HOME",gihome,"export") + self.ocommon.set_user_profile(giuser,"PATH",gipath,"export") + self.ocommon.set_user_profile(giuser,"LD_LIBRARY_PATH",gildpath,"export") + self.ocommon.set_user_profile(dbuser,"ORACLE_HOME",dbhome,"export") + self.ocommon.set_user_profile(dbuser,"DB_HOME",dbhome,"export") + self.ocommon.set_user_profile(dbuser,"PATH",dbpath,"export") + self.ocommon.set_user_profile(dbuser,"LD_LIBRARY_PATH",dbldpath,"export") + #### Setting alias + self.ocommon.set_user_profile(giuser,"cdgihome",cdgihome,"alias") + self.ocommon.set_user_profile(giuser,"cddbhome",cddbhome,"alias") + self.ocommon.set_user_profile(dbuser,"cddbhome",cddbhome,"alias") + self.ocommon.set_user_profile(giuser,"cdgilogs",cdgilogs,"alias") + self.ocommon.set_user_profile(dbuser,"cddblogs",cddblogs,"alias") + self.ocommon.set_user_profile(dbuser,"cdinvlogs",cdinvlogs,"alias") + self.ocommon.set_user_profile(giuser,"cdinvlogs",cdinvlogs,"alias") + + +##### Set the banner ### + def set_banner(self): + """ + This function set the banner + """ + if self.ocommon.check_key("OP_TYPE",self.ora_env_dict): + if self.ocommon.check_key("GI_SW_UNZIPPED_FLAG",self.ora_env_dict) and self.ora_env_dict["OP_TYPE"] == 'nosetup': + msg="Since OP_TYPE is setup to default value(nosetup),setup will be initated by other nodes based on the value OP_TYPES" + self.ocommon.log_info_message(self.ocommon.print_banner(msg),self.file_name) + elif self.ocommon.check_key("GI_SW_UNZIPPED_FLAG",self.ora_env_dict) and self.ora_env_dict["OP_TYPE"] != 'nosetup': + msg="Since OP_TYPE is set to " + self.ora_env_dict["OP_TYPE"] + " ,setup will be initated on this node" + self.ocommon.log_info_message(self.ocommon.print_banner(msg),self.file_name) + else: + giuser,gihome,obase,invloc=self.ocommon.get_gi_params() + pubhostname = self.ocommon.get_public_hostname() + retcode1=self.ocvu.check_home(pubhostname,gihome,giuser) + if retcode1 == 0: + self.ora_env_dict=self.ocommon.add_key("GI_HOME_INSTALLED_FLAG","true",self.ora_env_dict) + status=self.ocommon.check_gi_installed(retcode1,gihome,giuser) + if status: + msg="Grid is already installed on this machine" + self.ocommon.log_info_message(self.ocommon.print_banner(msg),self.file_name) + self.ora_env_dict=self.ocommon.add_key("GI_HOME_CONFIGURED_FLAG","true",self.ora_env_dict) + else: + msg="Grid is not installed on this machine" + self.ocommon.log_info_message(self.ocommon.print_banner(msg),self.file_name) diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/orasshsetup.py b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/orasshsetup.py new file mode 100644 index 0000000000..487bc0ecfc --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/orasshsetup.py @@ -0,0 +1,213 @@ +#!/usr/bin/python3 + +############################# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ + +""" + This file contains to the code call different classes objects based on setup type +""" + +from oralogger import * +from oraenv import * +from oracommon import * +from oramachine import * +from orasetupenv import * +from orasshsetup import * +from oracvu import * + +import os +import sys + +class OraSetupSSH: + """ + This class setup the env before setting up the rac env + """ + def __init__(self,oralogger,orahandler,oraenv,oracommon): + try: + self.ologger = oralogger + self.ohandler = orahandler + self.oenv = oraenv.get_instance() + self.ocommon = oracommon + self.ora_env_dict = oraenv.get_env_vars() + self.file_name = os.path.basename(__file__) + except BaseException as ex: + ex_type, ex_value, ex_traceback = sys.exc_info() + trace_back = sys.tracebacklimit.extract_tb(ex_traceback) + stack_trace = list() + for trace in trace_back: + stack_trace.append("File : %s , Line : %d, Func.Name : %s, Message : %s" % (trace[0], trace[1], trace[2], trace[3])) + self.ocommon.log_info_message(ex_type.__name__,self.file_name) + self.ocommon.log_info_message(ex_value,self.file_name) + self.ocommon.log_info_message(stack_trace,self.file_name) + def setup(self): + """ + This function setup ssh between computes + """ + self.ocommon.log_info_message("Start setup()",self.file_name) + ct = datetime.datetime.now() + bts = ct.timestamp() + if self.ocommon.check_key("SKIP_SSH_SETUP",self.ora_env_dict): + self.ocommon.log_info_message("Skipping SSH setup as SKIP_SSH_SETUP flag is set",self.file_name) + else: + SSH_USERS=[self.ora_env_dict["GRID_USER"] + ":" + self.ora_env_dict["GRID_HOME"],self.ora_env_dict["DB_USER"] + ":" + self.ora_env_dict["DB_HOME"]] + if (self.ocommon.check_key("SSH_PRIVATE_KEY",self.ora_env_dict)) and (self.ocommon.check_key("SSH_PUBLIC_KEY",self.ora_env_dict)): + if self.ocommon.check_file(self.ora_env_dict["SSH_PRIVATE_KEY"],True,None,None) and self.ocommon.check_file(self.ora_env_dict["SSH_PUBLIC_KEY"],True,None,None): + for sshi in SSH_USERS: + uohome=sshi.split(":") + self.setupsshusekey(uohome[0],uohome[1],None) + #self.verifyssh(uohome[0],None) + else: + for sshi in SSH_USERS: + uohome=sshi.split(":") + exiting_cls_node=self.ocommon.get_existing_clu_nodes(False) + if exiting_cls_node: + self.setupssh(uohome[0],uohome[1],"ADDNODE") + else: + self.setupssh(uohome[0],uohome[1],"INSTALL") + + #self.verifyssh(uohome[0],None) + + ct = datetime.datetime.now() + ets = ct.timestamp() + totaltime=ets - bts + self.ocommon.log_info_message("Total time for setup() = [ " + str(round(totaltime,3)) + " ] seconds",self.file_name) + + def setupssh(self,user,ohome,ctype): + """ + This function setup the ssh between user as SKIP_SSH_SETUP flag is not set + """ + self.ocommon.reset_os_password(user) + password=self.ocommon.get_os_password() + giuser,gihome,gibase,oinv=self.ocommon.get_gi_params() + expect=self.ora_env_dict["EXPECT"] if self.ocommon.check_key("EXPECT",self.ora_env_dict) else "/bin/expect" + script_dir=self.ora_env_dict["SSHSCR_DIR"] if self.ocommon.check_key("SSHSCR_DIR",self.ora_env_dict) else "/opt/scripts/startup/scripts" + sshscr=self.ora_env_dict["SSHSCR"] if self.ocommon.check_key("SSHSCR",self.ora_env_dict) else "bin/cluvfy" + if user == 'grid': + sshscr="runcluvfy.sh" + else: + sshscr="bin/cluvfy" + + cluster_nodes="" + if ctype == 'INSTALL': + cluster_nodes=self.ocommon.get_cluster_nodes() + cluster_nodes = cluster_nodes.replace(" ",",") + i=0 + while i < 5: + self.ocommon.set_mask_str(password.strip()) + self.ocommon.log_info_message('''SSH setup in progress. Count set to {0}'''.format(i),self.file_name) + cmd='''su - {0} -c "echo \"{4}\" | {1}/{2} comp admprv -n {3} -o user_equiv -fixup"'''.format(user,gihome,sshscr,cluster_nodes,'HIDDEN_STRING') + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + self.ocommon.unset_mask_str() + retcode=self.verifyssh(user,gihome,sshscr,cluster_nodes) + if retcode == 0: + break + else: + i = i + 1 + self.ocommon.log_info_message('''SSH setup verification failed. Trying again..''',self.file_name) + + elif ctype == 'ADDNODE': + cluster_nodes=self.ocommon.get_cluster_nodes() + cluster_nodes = cluster_nodes.replace(" ",",") + exiting_cls_node=self.ocommon.get_existing_clu_nodes(True) + new_nodes=cluster_nodes + "," + exiting_cls_node + + cmd='''su - {0} -c "rm -rf ~/.ssh ; mkdir -p ~/.ssh ; chmod 700 ~/.ssh"'''.format(user) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,False) + + self.ocommon.set_mask_str(password.strip()) + i=0 + while i < 5: + self.ocommon.log_info_message('''SSH setup in progress. Count set to {0}'''.format(i),self.file_name) + cmd='''su - {0} -c "echo \"{4}\" | {1}/{2} comp admprv -n {3} -o user_equiv -fixup"'''.format(user,gihome,sshscr,new_nodes,'HIDDEN_STRING') + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + self.ocommon.unset_mask_str() + retcode=self.verifyssh(user,gihome,sshscr,new_nodes) + if retcode == 0: + break + else: + i = i + 1 + self.ocommon.log_info_message('''SSH setup verification failed. Trying again..''',self.file_name) + else: + cluster_nodes=self.ocommon.get_cluster_nodes() + + def verifyssh(self,user,gihome,sshscr,cls_nodes): + """ + This function setup the ssh between user as SKIP_SSH_SETUP flag is not set + """ + self.ocommon.log_info_message("Verifying SSH between nodes " + cls_nodes, self.file_name) + cls_nodes = cls_nodes.replace(" ",",") + cmd='''su - {0} -c "{1}/{2} comp admprv -n {3} -o user_equiv -sshonly -verbose"'''.format(user,gihome,sshscr,cls_nodes) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + return retcode + + def setupsshusekey(self,user,ohome,ctype): + """ + This function setup the ssh between user as SKIP_SSH_SETUP flag is not set + This will be using existing key to setup the ssh + """ + # Populate Known Host file + i=1 + + cluster_nodes="" + new_nodes=self.ocommon.get_cluster_nodes() + existing_cls_node=self.ocommon.get_existing_clu_nodes(None) + giuser,gihome,gibase,oinv=self.ocommon.get_gi_params() + sshscr=self.ora_env_dict["SSHSCR"] if self.ocommon.check_key("SSHSCR",self.ora_env_dict) else "bin/cluvfy" + if user == 'grid': + sshscr="runcluvfy.sh" + else: + sshscr="bin/cluvfy" + # node=exiting_cls_node.split(" ")[0] + if existing_cls_node is not None: + cluster_nodes= existing_cls_node.replace(","," ") + " " + new_nodes + else: + cluster_nodes=new_nodes + + for node1 in cluster_nodes.split(" "): + for node in cluster_nodes.split(" "): + i=1 + #cmd='''su - {0} -c "ssh-keyscan -H {1} >> /home/{0}/.ssh/known_hosts"'''.format(user,node,ohome) + cmd='''su - {0} -c "ssh -o StrictHostKeyChecking=no -x -l {0} {3} \\"ssh-keygen -R {1};ssh -o StrictHostKeyChecking=no -x -l {0} {1} \\\"/bin/sh -c true\\\"\\""''' .format(user,node,ohome,node1) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + if int(retcode) != 0: + while (i < 5): + self.ocommon.log_info_message('''SSH setup failed for the cmd {0}. Trying again and count is {1}'''.format(cmd,i),self.file_name) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + if (retcode == 0): + break + else: + time.sleep(5) + i=i+1 + + retcode=self.verifyssh(user,gihome,sshscr,new_nodes) + + def setupsshdirs(self,user,ohome,ctype): + """ + This function setup the ssh directories + """ + sshdir='''/home/{0}/.ssh'''.format(user) + privkey=self.ora_env_dict["SSH_PRIVATE_KEY"] + pubkey=self.ora_env_dict["SSH_PUBLIC_KEY"] + group="oinstall" + cmd1='''mkdir -p {0}'''.format(sshdir) + cmd2='''chmod 700 {0}'''.format(sshdir) + cmd3='''cat {0} > {1}/id_rsa'''.format(privkey,sshdir) + cmd4='''cat {0} > {1}/id_rsa.pub'''.format(pubkey,sshdir) + cmd5='''chmod 400 {0}/id_rsa'''.format(sshdir) + cmd6='''chmod 644 {0}/id_rsa.pub'''.format(sshdir) + cmd7='''chown -R {0}:{1} {2}'''.format(user,group,sshdir) + cmd8='''cat {0} > {1}/authorized_keys'''.format(pubkey,sshdir) + cmd9='''chmod 600 {0}/authorized_keys'''.format(sshdir) + cmd10='''chown -R {0}:{1} {2}/authorized_keys'''.format(user,group,sshdir) + for cmd in cmd1,cmd2,cmd3,cmd4,cmd5,cmd6,cmd7,cmd8,cmd9,cmd10: + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,False) diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/setupSSH.expect b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/setupSSH.expect new file mode 100644 index 0000000000..484678e718 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/setupSSH.expect @@ -0,0 +1,46 @@ +#!/usr/bin/expect -f +############################# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ +# Description: Setup SSH between nodes +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +set username [lindex $argv 0]; +set script_loc [lindex $argv 1]; +set cluster_nodes [lindex $argv 2]; +set ssh_pass [lindex $argv 3]; + +set timeout 120 + +# Procedure to setup ssh from server +proc sshproc { ssh_pass } { + expect { + # Send password at 'Password' prompt and tell expect to continue(i.e. exp_continue) + -re "\[P|p]assword:" { exp_send "$ssh_pass\r" + exp_continue } + # Tell expect stay in this 'expect' block and for each character that SCP prints while doing the copy + # reset the timeout counter back to 0. + -re . { exp_continue } + timeout { return 1 } + eof { return 0 } + } +} + +# Execute sshUserSetup.sh Script +set ssh_cmd "$script_loc/sshUserSetup.sh -user $username -hosts \"${cluster_nodes}\" -logfile /tmp/${username}_SetupSSH.log -advanced -exverify -noPromptPassphrase -confirm" + +#set ssh_cmd "$script_loc/sshUserSetup.sh -user $username -hosts \"${cluster_nodes}\" -logfile /tmp/${username}_SetupSSH.log -advanced -noPromptPassphrase -confirm" + +eval spawn $ssh_cmd +set ssh_results [sshproc $ssh_pass] + +if { $ssh_results == 0 } { + exit 0 +} + +# Error attempting SSH, so exit with non-zero status +exit 1 diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/dockerfiles/19.3.0/scripts b/OracleDatabase/RAC/OracleRealApplicationClusters/dockerfiles/19.3.0/scripts new file mode 120000 index 0000000000..3c4aafe2bb --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/dockerfiles/19.3.0/scripts @@ -0,0 +1 @@ +../../common/scripts \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/dockerfiles/21.3.0/scripts b/OracleDatabase/RAC/OracleRealApplicationClusters/dockerfiles/21.3.0/scripts new file mode 120000 index 0000000000..3c4aafe2bb --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/dockerfiles/21.3.0/scripts @@ -0,0 +1 @@ +../../common/scripts \ No newline at end of file