From 956d65e05a66e4c37b98bf81d434e9a6b3bc19a5 Mon Sep 17 00:00:00 2001
From: jan-song <60806666+jan-song@users.noreply.github.com>
Date: Mon, 6 Apr 2020 16:22:04 +0800
Subject: [PATCH 01/16] Update oracle.py
---
sql/engines/oracle.py | 168 ++++++++++++++++++++++++++++++++----------
1 file changed, 128 insertions(+), 40 deletions(-)
diff --git a/sql/engines/oracle.py b/sql/engines/oracle.py
index f1eb47c0aa..03291fb55a 100644
--- a/sql/engines/oracle.py
+++ b/sql/engines/oracle.py
@@ -4,6 +4,7 @@
import traceback
import re
import sqlparse
+import MySQLdb
import simplejson as json
from common.config import SysConfig
@@ -44,6 +45,26 @@ def name(self):
@property
def info(self):
return 'Oracle engine'
+
+ @property
+ def auto_backup(self):
+ """是否支持备份"""
+ return True
+
+ @staticmethod
+ def get_backup_connection():
+ archer_config = SysConfig()
+ backup_host = archer_config.get('inception_remote_backup_host')
+ backup_port = int(archer_config.get('inception_remote_backup_port', 3306))
+ backup_user = archer_config.get('inception_remote_backup_user')
+ backup_password = archer_config.get('inception_remote_backup_password')
+ return MySQLdb.connect(host=backup_host,
+ port=backup_port,
+ user=backup_user,
+ passwd=backup_password,
+ charset='utf8mb4',
+ autocommit=True
+ )
@property
def server_version(self):
@@ -258,64 +279,42 @@ def execute_check(self, db_name=None, sql=''):
return check_result
def execute_workflow(self, workflow, close_conn=True):
- """执行上线单,返回Review set
- 原来的逻辑是根据 sql_content简单来分割SQL,进而再执行这些SQL
- 新的逻辑变更为根据审核结果中记录的sql来执行,
- 如果是PLSQL存储过程等对象定义操作,还需检查确认新建对象是否编译通过!
- """
- review_content = workflow.sqlworkflowcontent.review_content
- review_result = json.loads(review_content)
- sqlitemList = get_exec_sqlitem_list(review_result, workflow.db_name)
-
+ """执行上线单,返回Review set"""
sql = workflow.sqlworkflowcontent.sql_content
execute_result = ReviewSet(full_sql=sql)
-
+ # 删除注释语句,切分语句,将切换CURRENT_SCHEMA语句增加到切分结果中
+ sql = sqlparse.format(sql, strip_comments=True)
+ split_sql = [f"ALTER SESSION SET CURRENT_SCHEMA = {workflow.db_name};"] + sqlparse.split(sql)
line = 1
statement = None
try:
conn = self.get_connection()
cursor = conn.cursor()
# 逐条执行切分语句,追加到执行结果中
- for sqlitem in sqlitemList:
- statement = sqlitem.statement
- if sqlitem.stmt_type == "SQL":
- statement = statement.rstrip(';')
+ cursor.execute(f"alter session set nls_date_format='yyyy-mm-dd hh24:mi:ss'")
+ cursor.execute(f"select sysdate from dual")
+ rows = cursor.fetchone()
+ begin_time = rows[0]
+ for statement in split_sql:
+ statement = statement.rstrip(';')
with FuncTimer() as t:
- cursor.execute(statement)
- conn.commit()
-
- rowcount = cursor.rowcount
- stagestatus = "Execute Successfully"
- if sqlitem.stmt_type == "PLSQL" and sqlitem.object_name and sqlitem.object_name != 'ANONYMOUS' and sqlitem.object_name != '':
- query_obj_sql = f"""SELECT OBJECT_NAME, STATUS, TO_CHAR(LAST_DDL_TIME, 'YYYY-MM-DD HH24:MI:SS') FROM ALL_OBJECTS
- WHERE OWNER = '{sqlitem.object_owner}'
- AND OBJECT_NAME = '{sqlitem.object_name}'
- """
- cursor.execute(query_obj_sql)
- row = cursor.fetchone()
- if row:
- status = row[1]
- if status and status == "INVALID":
- stagestatus = "Compile Failed. Object " + sqlitem.object_owner + "." + sqlitem.object_name + " is invalid."
- else:
- stagestatus = "Compile Failed. Object " + sqlitem.object_owner + "." + sqlitem.object_name + " doesn't exist."
-
- if stagestatus != "Execute Successfully":
- raise Exception(stagestatus)
-
+ if statement !='':
+ cursor.execute(statement)
+ conn.commit()
execute_result.rows.append(ReviewResult(
id=line,
errlevel=0,
- stagestatus=stagestatus,
+ stagestatus='Execute Successfully',
errormessage='None',
sql=statement,
- affected_rows=rowcount,
+ affected_rows=cursor.rowcount,
execute_time=t.cost,
))
line += 1
except Exception as e:
logger.warning(f"Oracle命令执行报错,语句:{statement or sql}, 错误信息:{traceback.format_exc()}")
execute_result.error = str(e)
+ #conn.rollback()
# 追加当前报错语句信息到执行结果中
execute_result.rows.append(ReviewResult(
id=line,
@@ -328,22 +327,111 @@ def execute_workflow(self, workflow, close_conn=True):
))
line += 1
# 报错语句后面的语句标记为审核通过、未执行,追加到执行结果中
- for sqlitem in sqlitemList[line - 1:]:
+ for statement in split_sql[line - 1:]:
execute_result.rows.append(ReviewResult(
id=line,
errlevel=0,
stagestatus='Audit completed',
errormessage=f'前序语句失败, 未执行',
- sql=sqlitem.statement,
+ sql=statement,
affected_rows=0,
execute_time=0,
))
line += 1
finally:
+ # 生成回滚SQL
+ cursor.execute(f"select sysdate from dual")
+ rows = cursor.fetchone()
+ end_time = rows[0]
+ logmnr_start_sql = f'''begin
+ dbms_logmnr.start_logmnr(
+ starttime=>to_date('{begin_time}','yyyy-mm-dd hh24:mi:ss'),
+ endtime=>to_date('{end_time}','yyyy/mm/dd hh24:mi:ss'),
+ options=>dbms_logmnr.dict_from_online_catalog + dbms_logmnr.continuous_mine);
+ end;'''
+ undo_sql = f'''select sql_redo,sql_undo from v$logmnr_contents where
+ SEG_OWNER <> 'SYS'
+ and session# = (select s.sid from v$session s where s.sid = (select sid from v$mystat where rownum = 1 ))
+ and serial# = (select serial# from v$session s where s.sid = (select sid from v$mystat where rownum = 1 )) order by scn desc'''
+ logmnr_end_sql = f'''begin
+ dbms_logmnr.end_logmnr;
+ end;'''
+ workflow_id = f"{workflow.sqlworkflowcontent.workflow_id}"
+ cursor.execute(logmnr_start_sql)
+ cursor.execute(undo_sql)
+ rows = cursor.fetchall()
+ cursor.execute(logmnr_end_sql)
+ self.ora_backup_insert(rows=rows, id=workflow_id)
if close_conn:
self.close()
return execute_result
+ def ora_backup_insert(self, rows = [], id = 0):
+ # 回滚SQL入库
+ # 创建连接
+ try:
+ conn = self.get_backup_connection()
+ cur = conn.cursor()
+ cur.execute(f"""create database if not exists ora_backup;""")
+ cur.execute(f"use ora_backup;")
+ cur.execute(f"""CREATE TABLE if not exists `sql_rollback` (
+ `id` bigint(20) NOT NULL AUTO_INCREMENT,
+ `redo_sql` mediumtext,
+ `undo_sql` mediumtext,
+ `workflow_id` bigint(20) NOT NULL,
+ PRIMARY KEY (`id`),
+ key `idx_sql_rollback_01` (`workflow_id`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;""")
+ if len(rows) > 0:
+ for row in rows:
+ redo_sql=f"{row[0]}"
+ redo_sql=redo_sql.replace("'","\\'")
+ undo_sql=f"{row[1]}"
+ undo_sql=undo_sql.replace("'","\\'")
+ sql = f"""insert into sql_rollback(redo_sql,undo_sql,workflow_id) values('{redo_sql}','{undo_sql}',{id});"""
+ cur.execute(sql)
+ except Exception as e:
+ logger.warning(f"备份失败,错误信息{traceback.format_exc()}")
+ return False
+ finally:
+ # 关闭连接
+ if conn:
+ conn.close()
+ return True
+
+ def get_rollback(self, workflow):
+ """
+ 获取回滚语句,并且按照执行顺序倒序展示,return ['源语句','回滚语句']
+ """
+ list_execute_result = json.loads(workflow.sqlworkflowcontent.execute_result)
+ workflow_id = workflow.sqlworkflowcontent.workflow_id
+ # 回滚语句倒序展示
+ list_execute_result.reverse()
+ list_backup_sql = []
+ try:
+ # 创建连接
+ conn = self.get_backup_connection()
+ cur = conn.cursor()
+ sql = f"""select redo_sql,undo_sql from sql_rollback where workflow_id = {workflow_id} order by id;"""
+ cur.execute(f"use ora_backup;")
+ cur.execute(sql)
+ list_tables = cur.fetchall()
+ for row in list_tables:
+ redo_sql = row[0]
+ if row[1] is None:
+ undo_sql = ' '
+ else:
+ undo_sql = row[1]
+ # 拼接成回滚语句列表,['源语句','回滚语句']
+ list_backup_sql.append([redo_sql,undo_sql])
+ except Exception as e:
+ logger.error(f"获取回滚语句报错,异常信息{traceback.format_exc()}")
+ raise Exception(e)
+ # 关闭连接
+ if conn:
+ conn.close()
+ return list_backup_sql
+
def close(self):
if self.conn:
self.conn.close()
From 6e15bff89b21edd7712d11e37252e954e5c2ba3d Mon Sep 17 00:00:00 2001
From: jan-song <60806666+jan-song@users.noreply.github.com>
Date: Mon, 6 Apr 2020 16:44:11 +0800
Subject: [PATCH 02/16] Update oracle.py
---
sql/engines/oracle.py | 411 +++++++++++++++++++++++++++++++++++-------
1 file changed, 350 insertions(+), 61 deletions(-)
diff --git a/sql/engines/oracle.py b/sql/engines/oracle.py
index 03291fb55a..fa7f9706af 100644
--- a/sql/engines/oracle.py
+++ b/sql/engines/oracle.py
@@ -9,7 +9,7 @@
from common.config import SysConfig
from common.utils.timer import FuncTimer
-from sql.utils.sql_utils import get_syntax_type, get_full_sqlitem_list, get_exec_sqlitem_list
+from sql.utils.sql_utils import get_syntax_type
from . import EngineBase
import cx_Oracle
from .models import ResultSet, ReviewSet, ReviewResult
@@ -17,7 +17,6 @@
logger = logging.getLogger('default')
-
class OracleEngine(EngineBase):
def __init__(self, instance=None):
@@ -45,7 +44,7 @@ def name(self):
@property
def info(self):
return 'Oracle engine'
-
+
@property
def auto_backup(self):
"""是否支持备份"""
@@ -103,7 +102,7 @@ def _get_all_schemas(self):
'DIP USERS', 'EXFSYS', 'FLOWS_FILES', 'HR USERS', 'IX USERS', 'MDDATA', 'MDSYS', 'MGMT_VIEW', 'OE USERS',
'OLAPSYS', 'ORACLE_OCM', 'ORDDATA', 'ORDPLUGINS', 'ORDSYS', 'OUTLN', 'OWBSYS', 'OWBSYS_AUDIT', 'PM USERS',
'SCOTT', 'SH USERS', 'SI_INFORMTN_SCHEMA', 'SPATIAL_CSW_ADMIN_USR', 'SPATIAL_WFS_ADMIN_USR', 'SYS',
- 'SYSMAN', 'SYSTEM', 'WMSYS', 'XDB', 'XS$NULL')
+ 'SYSMAN', 'SYSTEM', 'WMSYS', 'XDB', 'XS$NULL', 'DIP', 'OJVMSYS', 'LBACSYS')
schema_list = [row[0] for row in result.rows if row[0] not in sysschema]
result.rows = schema_list
return result
@@ -112,7 +111,15 @@ def get_all_tables(self, db_name, **kwargs):
"""获取table 列表, 返回一个ResultSet"""
sql = f"""SELECT table_name FROM all_tables WHERE nvl(tablespace_name, 'no tablespace') NOT IN ('SYSTEM', 'SYSAUX') AND OWNER = '{db_name}' AND IOT_NAME IS NULL AND DURATION IS NULL
"""
- result = self.query(sql=sql)
+ result = self.query(db_name=db_name, sql=sql)
+ tb_list = [row[0] for row in result.rows if row[0] not in ['test']]
+ result.rows = tb_list
+ return result
+
+ def get_all_objects(self, db_name, **kwargs):
+ """获取table 列表, 返回一个ResultSet"""
+ sql = f"""SELECT object_name FROM all_objects WHERE OWNER = '{db_name}' """
+ result = self.query(db_name=db_name, sql=sql)
tb_list = [row[0] for row in result.rows if row[0] not in ['test']]
result.rows = tb_list
return result
@@ -136,9 +143,143 @@ def describe_table(self, db_name, tb_name, **kwargs):
FROM all_tab_cols
WHERE table_name = '{tb_name}' and owner = '{db_name}'
"""
- result = self.query(sql=sql)
+ result = self.query(db_name=db_name, sql=sql)
return result
+ def object_name_check(self, db_name=None, object_name=''):
+ """获取table 列表, 返回一个ResultSet"""
+ if '.' in object_name:
+ schema_name = object_name.split('.')[0]
+ object_name = object_name.split('.')[1]
+ sql = f"""SELECT object_name FROM all_objects WHERE OWNER = upper('{schema_name}') and OBJECT_NAME = upper('{object_name}')"""
+ else:
+ sql = f"""SELECT object_name FROM all_objects WHERE OWNER = upper('{db_name}') and OBJECT_NAME = upper('{object_name}')"""
+ result = self.query(db_name=db_name, sql=sql,close_conn=False)
+ if result.affected_rows > 0:
+ return True
+ else:
+ return False
+
+ def get_sql_first_object_name(self, sql=''):
+ """获取sql文本中的object_name"""
+ object_name = ''
+ if re.match(r"^create\s+table\s", sql):
+ object_name = re.match(r"^create\s+table\s(.+?)(\s|\()",sql,re.M).group(1)
+ elif re.match(r"^create\s+index\s", sql):
+ object_name = re.match(r"^create\s+index\s(.+?)\s",sql,re.M).group(1)
+ elif re.match(r"^create\s+unique\s+index\s", sql):
+ object_name = re.match(r"^create\s+unique\s+index\s(.+?)\s", sql, re.M).group(1)
+ elif re.match(r"^create\s+sequence\s", sql):
+ object_name = re.match(r"^create\s+sequence\s(.+?)(\s|$)",sql,re.M).group(1)
+ elif re.match(r"^alter\s+table\s", sql):
+ object_name = re.match(r"^alter\s+table\s(.+?)\s",sql,re.M).group(1)
+ elif re.match(r"^create\s+function\s", sql):
+ object_name = re.match(r"^create\s+function\s(.+?)(\s|\()",sql,re.M).group(1)
+ elif re.match(r"^create\s+view\s", sql):
+ object_name = re.match(r"^create\s+view\s(.+?)\s",sql,re.M).group(1)
+ elif re.match(r"^create\s+procedure\s", sql):
+ object_name = re.match(r"^create\s+procedure\s(.+?)\s",sql,re.M).group(1)
+ elif re.match(r"^create\s+package\s+body", sql):
+ object_name = re.match(r"^create\s+package\s+body\s(.+?)\s",sql,re.M).group(1)
+ elif re.match(r"^create\s+package\s", sql):
+ object_name = re.match(r"^create\s+package\s(.+?)\s",sql,re.M).group(1)
+ else:
+ return object_name.strip()
+ return object_name.strip()
+
+ def check_create_index_table(self,sql='',object_name_list=set(),db_name=''):
+ result = {'msg': '', 'bad_query': False}
+ table_name = ''
+ if re.match(r"^create\s+index\s",sql):
+ table_name = re.match(r"^create\s+index\s+.+\s+on\s(.+?)(\(|\s\()",sql,re.M).group(1)
+ if '.' not in table_name:
+ table_name = f"{db_name}.{table_name}"
+ if table_name in object_name_list:
+ return True
+ else:
+ return False
+ elif re.match(r"^create\s+unique\s+index\s", sql):
+ table_name = re.match(r"^create\s+unique\s+index\s+.+\s+on\s(.+?)(\(|\s\()", sql, re.M).group(1)
+ if '.' not in table_name:
+ table_name = f"{db_name}.{table_name}"
+ if table_name in object_name_list:
+ return True
+ else:
+ return False
+ else:
+ return False
+
+ def get_dml_table(self,sql='',object_name_list=set(),db_name=''):
+ if re.match(r"^update",sql):
+ table_name = re.match(r"^update\s(.+?)\s",sql,re.M).group(1)
+ if '.' not in table_name:
+ table_name = f"{db_name}.{table_name}"
+ if table_name in object_name_list:
+ return True
+ else:
+ return False
+ elif re.match(r"^delete", sql):
+ table_name = re.match(r"^delete\s+from\s(.+?)\s", sql, re.M).group(1)
+ if '.' not in table_name:
+ table_name = f"{db_name}.{table_name}"
+ if table_name in object_name_list:
+ return True
+ else:
+ return False
+ elif re.match(r"^insert", sql):
+ table_name = re.match(r"^insert\s+into\s(.+?)(\(|\s)", sql, re.M).group(1)
+ if '.' not in table_name:
+ table_name = f"{db_name}.{table_name}"
+ if table_name in object_name_list:
+ return True
+ else:
+ return False
+ else:
+ return False
+
+ def where_check(self,sql=''):
+ if re.match(r"^update((?!where).)*$|^delete((?!where).)*$",sql):
+ return True
+ else:
+ parsed = sqlparse.parse(sql)[0]
+ flattened = list(parsed.flatten())
+ n_skip = 0
+ flattened = flattened[:len(flattened) - n_skip]
+ logical_operators = ('AND', 'OR', 'NOT', 'BETWEEN', 'ORDER BY', 'GROUP BY', 'HAVING')
+ for t in reversed(flattened):
+ if t.is_keyword:
+ return True
+ return False
+
+ def explain_check(self, db_name=None, sql='', close_conn=False):
+ result = {'msg': '', 'rows': 0}
+ try:
+ conn = self.get_connection()
+ cursor = conn.cursor()
+ if db_name:
+ cursor.execute(f"ALTER SESSION SET CURRENT_SCHEMA = {db_name}")
+ if re.match(r"^explain", sql, re.I):
+ sql = sql
+ else:
+ sql = f"explain plan for {sql}"
+ sql = sql.rstrip(';')
+ cursor.execute(sql)
+ # 获取影响行数
+ cursor.execute(f"select CARDINALITY from SYS.PLAN_TABLE$ where id = 0")
+ rows = cursor.fetchone()
+ conn.rollback()
+ if rows[0] is None:
+ result['rows'] = 0
+ else:
+ result['rows'] = rows[0]
+ except Exception as e:
+ logger.warning(f"Oracle 语句执行报错,语句:{sql},错误信息{traceback.format_exc()}")
+ result['msg'] = str(e)
+ finally:
+ if close_conn:
+ self.close()
+ return result
+
def query_check(self, db_name=None, sql=''):
# 查询语句的检查、注释去除、切分
result = {'msg': '', 'bad_query': False, 'filtered_sql': sql, 'has_star': False}
@@ -146,17 +287,17 @@ def query_check(self, db_name=None, sql=''):
star_patter = r"(^|,|\s)\*(\s|\(|$)"
# 删除注释语句,进行语法判断,执行第一条有效sql
try:
+ sql = sqlparse.format(sql, strip_comments=True)
sql = sqlparse.split(sql)[0]
result['filtered_sql'] = re.sub(r';$', '', sql.strip())
- sql = sqlparse.format(sql, strip_comments=True)
sql_lower = sql.lower()
except IndexError:
result['bad_query'] = True
result['msg'] = '没有有效的SQL语句'
return result
- if re.match(r"^select", sql_lower) is None:
+ if re.match(r"^select|^with|^explain", sql_lower) is None:
result['bad_query'] = True
- result['msg'] = '仅支持^select语法!'
+ result['msg'] = '不支持语法!'
return result
if re.search(star_patter, sql_lower) is not None:
keyword_warning += '禁止使用 * 关键词\n'
@@ -166,20 +307,20 @@ def query_check(self, db_name=None, sql=''):
result['bad_query'] = True
if result.get('bad_query') or result.get('has_star'):
result['msg'] = keyword_warning
+ #select语句先使用Explain判断语法是否正确
+ if re.match(r"^select|^with", sql, re.I):
+ explain_result = self.explain_check(db_name=db_name, sql=f"explain plan for {sql}")
+ if explain_result['msg']:
+ result['bad_query'] = True
+ result['msg'] = explain_result['msg']
return result
def filter_sql(self, sql='', limit_num=0):
sql_lower = sql.lower()
# 对查询sql增加limit限制
- if re.match(r"^\s*select", sql_lower):
- # 针对select count(*) from之类的SQL,不做limit限制
- if re.match(r"^\s*select\s+count\s*\(\s*[\*|\d]\s*\)\s+from", sql_lower, re.I):
- return sql.rstrip(';')
+ if re.match(r"^select|^with", sql_lower):
if sql_lower.find(' rownum ') == -1:
- if sql_lower.find('where') == -1:
- return f"{sql.rstrip(';')} WHERE ROWNUM <= {limit_num}"
- else:
- return f"{sql.rstrip(';')} AND ROWNUM <= {limit_num}"
+ f"select a.* from ({sql.rstrip(';')}) a WHERE ROWNUM <= {limit_num}"
return sql.strip()
def query(self, db_name=None, sql='', limit_num=0, close_conn=True, **kwargs):
@@ -190,6 +331,18 @@ def query(self, db_name=None, sql='', limit_num=0, close_conn=True, **kwargs):
cursor = conn.cursor()
if db_name:
cursor.execute(f"ALTER SESSION SET CURRENT_SCHEMA = {db_name}")
+ if re.match(r"^explain", sql, re.I):
+ try:
+ sql = sql.rstrip(';')
+ cursor.execute(sql)
+ # 重置SQL文本,获取SQL执行计划
+ sql = f"select PLAN_TABLE_OUTPUT from table(dbms_xplan.display)"
+ except Exception as e:
+ logger.warning(f"Oracle命令执行报错,语句:{sql}, 错误信息:{traceback.format_exc()}")
+ result_set.error = str(e)
+ if close_conn:
+ self.close()
+ return result_set
cursor.execute(sql)
fields = cursor.description
if any(x[1] == cx_Oracle.CLOB for x in fields):
@@ -217,65 +370,201 @@ def query_masking(self, schema_name=None, sql='', resultset=None):
"""传入 sql语句, db名, 结果集,
返回一个脱敏后的结果集"""
# 仅对select语句脱敏
- if re.match(r"^select", sql, re.I):
+ if re.match(r"^select|^with", sql, re.I):
filtered_result = brute_mask(self.instance, resultset)
filtered_result.is_masked = True
else:
filtered_result = resultset
return filtered_result
- def execute_check(self, db_name=None, sql=''):
+ def execute_check(self, db_name=None, sql='', close_conn=True):
"""上线单执行前的检查, 返回Review set"""
config = SysConfig()
check_result = ReviewSet(full_sql=sql)
+ explain_re = r"^merge|^update|^delete|^insert|^create\s+table|^create\s+index|^create\s+unique\s+index"
# 禁用/高危语句检查
line = 1
+ #
+ object_name_list = set()
+ cache_object_name_list = set()
critical_ddl_regex = config.get('critical_ddl_regex', '')
p = re.compile(critical_ddl_regex)
check_result.syntax_type = 2 # TODO 工单类型 0、其他 1、DDL,2、DML
+ try:
+ for statement in sqlparse.split(sql):
+ statement = sqlparse.format(statement, strip_comments=True, reindent=True, keyword_case='lower')
+ sql_lower = statement.lower().rstrip(';')
+ # 禁用语句
+ if re.match(r"^select|^with|^explain", sql_lower):
+ check_result.is_critical = True
+ result = ReviewResult(id=line, errlevel=2,
+ stagestatus='驳回不支持语句',
+ errormessage='仅支持DML和DDL语句,查询语句请使用SQL查询功能!',
+ sql=statement)
+ # 高危语句
+ elif critical_ddl_regex and p.match(sql_lower.strip()):
+ check_result.is_critical = True
+ result = ReviewResult(id=line, errlevel=2,
+ stagestatus='驳回高危SQL',
+ errormessage='禁止提交匹配' + critical_ddl_regex + '条件的语句!',
+ sql=statement)
+ # 未带where数据修改语句
+ elif re.match(r"^update((?!where).)*$|^delete((?!where).)*$",sql_lower):
+ check_result.is_critical = True
+ result = ReviewResult(id=line, errlevel=2,
+ stagestatus='驳回未带where数据修改',
+ errormessage='数据修改需带where条件!',
+ sql=statement)
+ # 驳回事务控制,会话控制SQL
+ elif re.match(r"^set|^rollback|^exit", sql_lower):
+ check_result.is_critical = True
+ result = ReviewResult(id=line, errlevel=2,
+ stagestatus='SQL中不能包含^set|^rollback|^exit',
+ errormessage='SQL中不能包含^set|^rollback|^exit',
+ sql=statement)
- # 把所有SQL转换成SqlItem List。 如有多行(内部有多个;)执行块,约定以delimiter $$作为开始, 以$$结束
- # 需要在函数里实现单条SQL做sqlparse.format(sql, strip_comments=True)
- sqlitemList = get_full_sqlitem_list(sql, db_name)
-
- for sqlitem in sqlitemList:
- # 禁用语句
- if re.match(r"^\s*select", sqlitem.statement.lower(), re.I):
- check_result.is_critical = True
- result = ReviewResult(id=line, errlevel=2,
- stagestatus='驳回不支持语句',
- errormessage='仅支持DML和DDL语句,查询语句请使用SQL查询功能!',
- sql=sqlitem.statement)
- # 高危语句
- elif critical_ddl_regex and p.match(sqlitem.statement.strip().lower()):
- check_result.is_critical = True
- result = ReviewResult(id=line, errlevel=2,
- stagestatus='驳回高危SQL',
- errormessage='禁止提交匹配' + critical_ddl_regex + '条件的语句!',
- sql=sqlitem.statement)
-
- # 正常语句
- else:
- result = ReviewResult(id=line, errlevel=0,
- stagestatus='Audit completed',
- errormessage='None',
- sql=sqlitem.statement,
- stmt_type=sqlitem.stmt_type,
- object_owner=sqlitem.object_owner,
- object_type=sqlitem.object_type,
- object_name=sqlitem.object_name,
- affected_rows=0,
- execute_time=0, )
- # 判断工单类型
- if get_syntax_type(sqlitem.statement) == 'DDL':
- check_result.syntax_type = 1
- check_result.rows += [result]
-
- # 遇到禁用和高危语句直接返回,提高效率
- if check_result.is_critical:
- check_result.error_count += 1
- return check_result
- line += 1
+ #通过explain做语法语义检查
+ elif re.match(explain_re, sql_lower):
+ if self.check_create_index_table(db_name=db_name,sql=sql_lower,object_name_list=object_name_list):
+ object_name = self.get_sql_first_object_name(sql=sql_lower)
+ if '.' in object_name:
+ object_name = object_name
+ else:
+ object_name = f"""{db_name}.{object_name}"""
+ object_name_list.add(object_name)
+ result = ReviewResult(id=line, errlevel=1,
+ stagestatus='WARNING:新建表的新建索引语句暂无法检测!',
+ errormessage='WARNING:新建表的新建索引语句暂无法检测!',
+ sql=statement)
+ elif len(object_name_list) > 0 and self.get_dml_table(db_name=db_name,sql=sql_lower,object_name_list=object_name_list):
+ result = ReviewResult(id=line, errlevel=1,
+ stagestatus='WARNING:新建表的数据修改暂无法检测!',
+ errormessage='WARNING:新建表的数据修改暂无法检测!',
+ sql=statement)
+ else:
+ result_set = self.explain_check(db_name=db_name, sql=statement, close_conn=False)
+ if result_set['msg']:
+ check_result.is_critical = True
+ result = ReviewResult(id=line, errlevel=2,
+ stagestatus='explain语法检查未通过!',
+ errormessage=result_set['msg'],
+ sql=statement)
+ else:
+ # 对create table\create index\create unique index语法做对象存在性检测
+ if re.match(r"^create\s+table|^create\s+index|^create\s+unique\s+index", sql_lower):
+ object_name = self.get_sql_first_object_name(sql=sql_lower)
+ # 保存create对象对后续SQL做存在性判断
+ if '.' in object_name:
+ object_name = object_name
+ else:
+ object_name = f"""{db_name}.{object_name}"""
+ if self.object_name_check(db_name=db_name,
+ object_name=object_name) or object_name in object_name_list:
+ check_result.is_critical = True
+ result = ReviewResult(id=line, errlevel=2,
+ stagestatus=f"""{object_name}对象已经存在!""",
+ errormessage=f"""{object_name}对象已经存在!""",
+ sql=statement)
+ else:
+ object_name_list.add(object_name)
+ if result_set['rows'] > 1000:
+ result = ReviewResult(id=line, errlevel=1,
+ stagestatus='影响行数大于1000,请关注',
+ errormessage='影响行数大于1000,请关注',
+ sql=statement,
+ affected_rows=result_set['rows'],
+ execute_time=0, )
+ else:
+ result = ReviewResult(id=line, errlevel=0,
+ stagestatus='Audit completed',
+ errormessage='None',
+ sql=statement,
+ affected_rows=result_set['rows'],
+ execute_time=0, )
+ else:
+ if result_set['rows'] > 1000:
+ result = ReviewResult(id=line, errlevel=1,
+ stagestatus='影响行数大于1000,请关注',
+ errormessage='影响行数大于1000,请关注',
+ sql=statement,
+ affected_rows=result_set['rows'],
+ execute_time=0, )
+ else:
+ result = ReviewResult(id=line, errlevel=0,
+ stagestatus='Audit completed',
+ errormessage='None',
+ sql=statement,
+ affected_rows=result_set['rows'],
+ execute_time=0, )
+ # 其它无法用explain判断的语句
+ else:
+ # 对alter table做对象存在性检查
+ if re.match(r"^alter\s+table\s", sql_lower):
+ object_name = self.get_sql_first_object_name(sql=sql_lower)
+ if '.' in object_name:
+ object_name = object_name
+ else:
+ object_name = f"""{db_name}.{object_name}"""
+ if not self.object_name_check(db_name=db_name, object_name=object_name) and object_name not in object_name_list:
+ check_result.is_critical = True
+ result = ReviewResult(id=line, errlevel=2,
+ stagestatus=f"""{object_name}对象不存在!""",
+ errormessage=f"""{object_name}对象不存在!""",
+ sql=statement)
+ else:
+ result = ReviewResult(id=line, errlevel=1,
+ stagestatus='当前平台,此语法不支持审核!',
+ errormessage='当前平台,此语法不支持审核!',
+ sql=statement,
+ affected_rows=0,
+ execute_time=0, )
+ # 对create做对象存在性检查
+ elif re.match(r"^create", sql_lower):
+ object_name = self.get_sql_first_object_name(sql=sql_lower)
+ if '.' in object_name:
+ object_name = object_name
+ else:
+ object_name = f"""{db_name}.{object_name}"""
+ if self.object_name_check(db_name=db_name,
+ object_name=object_name) or object_name in object_name_list:
+ check_result.is_critical = True
+ result = ReviewResult(id=line, errlevel=2,
+ stagestatus=f"""{object_name}对象已经存在!""",
+ errormessage=f"""{object_name}对象已经存在!""",
+ sql=statement)
+ else:
+ object_name_list.add(object_name)
+ result = ReviewResult(id=line, errlevel=1,
+ stagestatus='当前平台,此语法不支持审核!',
+ errormessage='当前平台,此语法不支持审核!',
+ sql=statement,
+ affected_rows=0,
+ execute_time=0, )
+ else:
+ result = ReviewResult(id=line, errlevel=1,
+ stagestatus='当前平台,此语法不支持审核!',
+ errormessage='当前平台,此语法不支持审核!',
+ sql=statement,
+ affected_rows=0,
+ execute_time=0, )
+
+ # 非高危SQL审核
+
+ # 判断工单类型
+ if get_syntax_type(sql=statement, db_type='oracle') == 'DDL':
+ check_result.syntax_type = 1
+ check_result.rows += [result]
+ # 遇到禁用和高危语句直接返回,提高效率
+ if check_result.is_critical:
+ check_result.error_count += 1
+ return check_result
+ line += 1
+ except Exception as e:
+ logger.warning(f"Oracle 语句执行报错,第{line}个SQL:{statement},错误信息{traceback.format_exc()}")
+ check_result.error = str(e)
+ finally:
+ if close_conn:
+ self.close()
return check_result
def execute_workflow(self, workflow, close_conn=True):
From 55ed081e0cdd4d5acb477bcadc034c76e58790c1 Mon Sep 17 00:00:00 2001
From: jan-song <60806666+jan-song@users.noreply.github.com>
Date: Mon, 6 Apr 2020 19:53:32 +0800
Subject: [PATCH 03/16] Update oracle.py
---
sql/engines/oracle.py | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/sql/engines/oracle.py b/sql/engines/oracle.py
index fa7f9706af..bcede437d5 100644
--- a/sql/engines/oracle.py
+++ b/sql/engines/oracle.py
@@ -319,8 +319,7 @@ def filter_sql(self, sql='', limit_num=0):
sql_lower = sql.lower()
# 对查询sql增加limit限制
if re.match(r"^select|^with", sql_lower):
- if sql_lower.find(' rownum ') == -1:
- f"select a.* from ({sql.rstrip(';')}) a WHERE ROWNUM <= {limit_num}"
+ reture f"select a.* from ({sql.rstrip(';')}) a WHERE ROWNUM <= {limit_num}"
return sql.strip()
def query(self, db_name=None, sql='', limit_num=0, close_conn=True, **kwargs):
From e52dd0b0c4f6d790de336692377c4f5fca1b8362 Mon Sep 17 00:00:00 2001
From: jan-song <60806666+jan-song@users.noreply.github.com>
Date: Mon, 6 Apr 2020 20:12:05 +0800
Subject: [PATCH 04/16] Update oracle.py
---
sql/engines/oracle.py | 571 +++++++-----------------------------------
1 file changed, 97 insertions(+), 474 deletions(-)
diff --git a/sql/engines/oracle.py b/sql/engines/oracle.py
index bcede437d5..27a6afd589 100644
--- a/sql/engines/oracle.py
+++ b/sql/engines/oracle.py
@@ -4,12 +4,11 @@
import traceback
import re
import sqlparse
-import MySQLdb
import simplejson as json
from common.config import SysConfig
from common.utils.timer import FuncTimer
-from sql.utils.sql_utils import get_syntax_type
+from sql.utils.sql_utils import get_syntax_type, get_full_sqlitem_list, get_exec_sqlitem_list
from . import EngineBase
import cx_Oracle
from .models import ResultSet, ReviewSet, ReviewResult
@@ -17,6 +16,7 @@
logger = logging.getLogger('default')
+
class OracleEngine(EngineBase):
def __init__(self, instance=None):
@@ -45,26 +45,6 @@ def name(self):
def info(self):
return 'Oracle engine'
- @property
- def auto_backup(self):
- """是否支持备份"""
- return True
-
- @staticmethod
- def get_backup_connection():
- archer_config = SysConfig()
- backup_host = archer_config.get('inception_remote_backup_host')
- backup_port = int(archer_config.get('inception_remote_backup_port', 3306))
- backup_user = archer_config.get('inception_remote_backup_user')
- backup_password = archer_config.get('inception_remote_backup_password')
- return MySQLdb.connect(host=backup_host,
- port=backup_port,
- user=backup_user,
- passwd=backup_password,
- charset='utf8mb4',
- autocommit=True
- )
-
@property
def server_version(self):
conn = self.get_connection()
@@ -111,15 +91,7 @@ def get_all_tables(self, db_name, **kwargs):
"""获取table 列表, 返回一个ResultSet"""
sql = f"""SELECT table_name FROM all_tables WHERE nvl(tablespace_name, 'no tablespace') NOT IN ('SYSTEM', 'SYSAUX') AND OWNER = '{db_name}' AND IOT_NAME IS NULL AND DURATION IS NULL
"""
- result = self.query(db_name=db_name, sql=sql)
- tb_list = [row[0] for row in result.rows if row[0] not in ['test']]
- result.rows = tb_list
- return result
-
- def get_all_objects(self, db_name, **kwargs):
- """获取table 列表, 返回一个ResultSet"""
- sql = f"""SELECT object_name FROM all_objects WHERE OWNER = '{db_name}' """
- result = self.query(db_name=db_name, sql=sql)
+ result = self.query(sql=sql)
tb_list = [row[0] for row in result.rows if row[0] not in ['test']]
result.rows = tb_list
return result
@@ -143,143 +115,9 @@ def describe_table(self, db_name, tb_name, **kwargs):
FROM all_tab_cols
WHERE table_name = '{tb_name}' and owner = '{db_name}'
"""
- result = self.query(db_name=db_name, sql=sql)
+ result = self.query(sql=sql)
return result
- def object_name_check(self, db_name=None, object_name=''):
- """获取table 列表, 返回一个ResultSet"""
- if '.' in object_name:
- schema_name = object_name.split('.')[0]
- object_name = object_name.split('.')[1]
- sql = f"""SELECT object_name FROM all_objects WHERE OWNER = upper('{schema_name}') and OBJECT_NAME = upper('{object_name}')"""
- else:
- sql = f"""SELECT object_name FROM all_objects WHERE OWNER = upper('{db_name}') and OBJECT_NAME = upper('{object_name}')"""
- result = self.query(db_name=db_name, sql=sql,close_conn=False)
- if result.affected_rows > 0:
- return True
- else:
- return False
-
- def get_sql_first_object_name(self, sql=''):
- """获取sql文本中的object_name"""
- object_name = ''
- if re.match(r"^create\s+table\s", sql):
- object_name = re.match(r"^create\s+table\s(.+?)(\s|\()",sql,re.M).group(1)
- elif re.match(r"^create\s+index\s", sql):
- object_name = re.match(r"^create\s+index\s(.+?)\s",sql,re.M).group(1)
- elif re.match(r"^create\s+unique\s+index\s", sql):
- object_name = re.match(r"^create\s+unique\s+index\s(.+?)\s", sql, re.M).group(1)
- elif re.match(r"^create\s+sequence\s", sql):
- object_name = re.match(r"^create\s+sequence\s(.+?)(\s|$)",sql,re.M).group(1)
- elif re.match(r"^alter\s+table\s", sql):
- object_name = re.match(r"^alter\s+table\s(.+?)\s",sql,re.M).group(1)
- elif re.match(r"^create\s+function\s", sql):
- object_name = re.match(r"^create\s+function\s(.+?)(\s|\()",sql,re.M).group(1)
- elif re.match(r"^create\s+view\s", sql):
- object_name = re.match(r"^create\s+view\s(.+?)\s",sql,re.M).group(1)
- elif re.match(r"^create\s+procedure\s", sql):
- object_name = re.match(r"^create\s+procedure\s(.+?)\s",sql,re.M).group(1)
- elif re.match(r"^create\s+package\s+body", sql):
- object_name = re.match(r"^create\s+package\s+body\s(.+?)\s",sql,re.M).group(1)
- elif re.match(r"^create\s+package\s", sql):
- object_name = re.match(r"^create\s+package\s(.+?)\s",sql,re.M).group(1)
- else:
- return object_name.strip()
- return object_name.strip()
-
- def check_create_index_table(self,sql='',object_name_list=set(),db_name=''):
- result = {'msg': '', 'bad_query': False}
- table_name = ''
- if re.match(r"^create\s+index\s",sql):
- table_name = re.match(r"^create\s+index\s+.+\s+on\s(.+?)(\(|\s\()",sql,re.M).group(1)
- if '.' not in table_name:
- table_name = f"{db_name}.{table_name}"
- if table_name in object_name_list:
- return True
- else:
- return False
- elif re.match(r"^create\s+unique\s+index\s", sql):
- table_name = re.match(r"^create\s+unique\s+index\s+.+\s+on\s(.+?)(\(|\s\()", sql, re.M).group(1)
- if '.' not in table_name:
- table_name = f"{db_name}.{table_name}"
- if table_name in object_name_list:
- return True
- else:
- return False
- else:
- return False
-
- def get_dml_table(self,sql='',object_name_list=set(),db_name=''):
- if re.match(r"^update",sql):
- table_name = re.match(r"^update\s(.+?)\s",sql,re.M).group(1)
- if '.' not in table_name:
- table_name = f"{db_name}.{table_name}"
- if table_name in object_name_list:
- return True
- else:
- return False
- elif re.match(r"^delete", sql):
- table_name = re.match(r"^delete\s+from\s(.+?)\s", sql, re.M).group(1)
- if '.' not in table_name:
- table_name = f"{db_name}.{table_name}"
- if table_name in object_name_list:
- return True
- else:
- return False
- elif re.match(r"^insert", sql):
- table_name = re.match(r"^insert\s+into\s(.+?)(\(|\s)", sql, re.M).group(1)
- if '.' not in table_name:
- table_name = f"{db_name}.{table_name}"
- if table_name in object_name_list:
- return True
- else:
- return False
- else:
- return False
-
- def where_check(self,sql=''):
- if re.match(r"^update((?!where).)*$|^delete((?!where).)*$",sql):
- return True
- else:
- parsed = sqlparse.parse(sql)[0]
- flattened = list(parsed.flatten())
- n_skip = 0
- flattened = flattened[:len(flattened) - n_skip]
- logical_operators = ('AND', 'OR', 'NOT', 'BETWEEN', 'ORDER BY', 'GROUP BY', 'HAVING')
- for t in reversed(flattened):
- if t.is_keyword:
- return True
- return False
-
- def explain_check(self, db_name=None, sql='', close_conn=False):
- result = {'msg': '', 'rows': 0}
- try:
- conn = self.get_connection()
- cursor = conn.cursor()
- if db_name:
- cursor.execute(f"ALTER SESSION SET CURRENT_SCHEMA = {db_name}")
- if re.match(r"^explain", sql, re.I):
- sql = sql
- else:
- sql = f"explain plan for {sql}"
- sql = sql.rstrip(';')
- cursor.execute(sql)
- # 获取影响行数
- cursor.execute(f"select CARDINALITY from SYS.PLAN_TABLE$ where id = 0")
- rows = cursor.fetchone()
- conn.rollback()
- if rows[0] is None:
- result['rows'] = 0
- else:
- result['rows'] = rows[0]
- except Exception as e:
- logger.warning(f"Oracle 语句执行报错,语句:{sql},错误信息{traceback.format_exc()}")
- result['msg'] = str(e)
- finally:
- if close_conn:
- self.close()
- return result
-
def query_check(self, db_name=None, sql=''):
# 查询语句的检查、注释去除、切分
result = {'msg': '', 'bad_query': False, 'filtered_sql': sql, 'has_star': False}
@@ -287,9 +125,9 @@ def query_check(self, db_name=None, sql=''):
star_patter = r"(^|,|\s)\*(\s|\(|$)"
# 删除注释语句,进行语法判断,执行第一条有效sql
try:
- sql = sqlparse.format(sql, strip_comments=True)
sql = sqlparse.split(sql)[0]
result['filtered_sql'] = re.sub(r';$', '', sql.strip())
+ sql = sqlparse.format(sql, strip_comments=True)
sql_lower = sql.lower()
except IndexError:
result['bad_query'] = True
@@ -297,7 +135,7 @@ def query_check(self, db_name=None, sql=''):
return result
if re.match(r"^select|^with|^explain", sql_lower) is None:
result['bad_query'] = True
- result['msg'] = '不支持语法!'
+ result['msg'] = '仅支持^select|^with|^explain查询语法!'
return result
if re.search(star_patter, sql_lower) is not None:
keyword_warning += '禁止使用 * 关键词\n'
@@ -307,19 +145,14 @@ def query_check(self, db_name=None, sql=''):
result['bad_query'] = True
if result.get('bad_query') or result.get('has_star'):
result['msg'] = keyword_warning
- #select语句先使用Explain判断语法是否正确
- if re.match(r"^select|^with", sql, re.I):
- explain_result = self.explain_check(db_name=db_name, sql=f"explain plan for {sql}")
- if explain_result['msg']:
- result['bad_query'] = True
- result['msg'] = explain_result['msg']
return result
def filter_sql(self, sql='', limit_num=0):
sql_lower = sql.lower()
# 对查询sql增加limit限制
if re.match(r"^select|^with", sql_lower):
- reture f"select a.* from ({sql.rstrip(';')}) a WHERE ROWNUM <= {limit_num}"
+ if re.match(r"^select|^with", sql_lower):
+ return f"select a.* from ({sql.rstrip(';')}) a WHERE ROWNUM <= {limit_num}"
return sql.strip()
def query(self, db_name=None, sql='', limit_num=0, close_conn=True, **kwargs):
@@ -330,18 +163,11 @@ def query(self, db_name=None, sql='', limit_num=0, close_conn=True, **kwargs):
cursor = conn.cursor()
if db_name:
cursor.execute(f"ALTER SESSION SET CURRENT_SCHEMA = {db_name}")
+ # 支持oralce查询SQL执行计划语句
if re.match(r"^explain", sql, re.I):
- try:
- sql = sql.rstrip(';')
- cursor.execute(sql)
- # 重置SQL文本,获取SQL执行计划
- sql = f"select PLAN_TABLE_OUTPUT from table(dbms_xplan.display)"
- except Exception as e:
- logger.warning(f"Oracle命令执行报错,语句:{sql}, 错误信息:{traceback.format_exc()}")
- result_set.error = str(e)
- if close_conn:
- self.close()
- return result_set
+ cursor.execute(sql)
+ # 重置SQL文本,获取SQL执行计划
+ sql = f"select PLAN_TABLE_OUTPUT from table(dbms_xplan.display)"
cursor.execute(sql)
fields = cursor.description
if any(x[1] == cx_Oracle.CLOB for x in fields):
@@ -369,240 +195,126 @@ def query_masking(self, schema_name=None, sql='', resultset=None):
"""传入 sql语句, db名, 结果集,
返回一个脱敏后的结果集"""
# 仅对select语句脱敏
- if re.match(r"^select|^with", sql, re.I):
+ if re.match(r"^select", sql, re.I):
filtered_result = brute_mask(self.instance, resultset)
filtered_result.is_masked = True
else:
filtered_result = resultset
return filtered_result
- def execute_check(self, db_name=None, sql='', close_conn=True):
+ def execute_check(self, db_name=None, sql=''):
"""上线单执行前的检查, 返回Review set"""
config = SysConfig()
check_result = ReviewSet(full_sql=sql)
- explain_re = r"^merge|^update|^delete|^insert|^create\s+table|^create\s+index|^create\s+unique\s+index"
# 禁用/高危语句检查
line = 1
- #
- object_name_list = set()
- cache_object_name_list = set()
critical_ddl_regex = config.get('critical_ddl_regex', '')
p = re.compile(critical_ddl_regex)
check_result.syntax_type = 2 # TODO 工单类型 0、其他 1、DDL,2、DML
- try:
- for statement in sqlparse.split(sql):
- statement = sqlparse.format(statement, strip_comments=True, reindent=True, keyword_case='lower')
- sql_lower = statement.lower().rstrip(';')
- # 禁用语句
- if re.match(r"^select|^with|^explain", sql_lower):
- check_result.is_critical = True
- result = ReviewResult(id=line, errlevel=2,
- stagestatus='驳回不支持语句',
- errormessage='仅支持DML和DDL语句,查询语句请使用SQL查询功能!',
- sql=statement)
- # 高危语句
- elif critical_ddl_regex and p.match(sql_lower.strip()):
- check_result.is_critical = True
- result = ReviewResult(id=line, errlevel=2,
- stagestatus='驳回高危SQL',
- errormessage='禁止提交匹配' + critical_ddl_regex + '条件的语句!',
- sql=statement)
- # 未带where数据修改语句
- elif re.match(r"^update((?!where).)*$|^delete((?!where).)*$",sql_lower):
- check_result.is_critical = True
- result = ReviewResult(id=line, errlevel=2,
- stagestatus='驳回未带where数据修改',
- errormessage='数据修改需带where条件!',
- sql=statement)
- # 驳回事务控制,会话控制SQL
- elif re.match(r"^set|^rollback|^exit", sql_lower):
- check_result.is_critical = True
- result = ReviewResult(id=line, errlevel=2,
- stagestatus='SQL中不能包含^set|^rollback|^exit',
- errormessage='SQL中不能包含^set|^rollback|^exit',
- sql=statement)
-
- #通过explain做语法语义检查
- elif re.match(explain_re, sql_lower):
- if self.check_create_index_table(db_name=db_name,sql=sql_lower,object_name_list=object_name_list):
- object_name = self.get_sql_first_object_name(sql=sql_lower)
- if '.' in object_name:
- object_name = object_name
- else:
- object_name = f"""{db_name}.{object_name}"""
- object_name_list.add(object_name)
- result = ReviewResult(id=line, errlevel=1,
- stagestatus='WARNING:新建表的新建索引语句暂无法检测!',
- errormessage='WARNING:新建表的新建索引语句暂无法检测!',
- sql=statement)
- elif len(object_name_list) > 0 and self.get_dml_table(db_name=db_name,sql=sql_lower,object_name_list=object_name_list):
- result = ReviewResult(id=line, errlevel=1,
- stagestatus='WARNING:新建表的数据修改暂无法检测!',
- errormessage='WARNING:新建表的数据修改暂无法检测!',
- sql=statement)
- else:
- result_set = self.explain_check(db_name=db_name, sql=statement, close_conn=False)
- if result_set['msg']:
- check_result.is_critical = True
- result = ReviewResult(id=line, errlevel=2,
- stagestatus='explain语法检查未通过!',
- errormessage=result_set['msg'],
- sql=statement)
- else:
- # 对create table\create index\create unique index语法做对象存在性检测
- if re.match(r"^create\s+table|^create\s+index|^create\s+unique\s+index", sql_lower):
- object_name = self.get_sql_first_object_name(sql=sql_lower)
- # 保存create对象对后续SQL做存在性判断
- if '.' in object_name:
- object_name = object_name
- else:
- object_name = f"""{db_name}.{object_name}"""
- if self.object_name_check(db_name=db_name,
- object_name=object_name) or object_name in object_name_list:
- check_result.is_critical = True
- result = ReviewResult(id=line, errlevel=2,
- stagestatus=f"""{object_name}对象已经存在!""",
- errormessage=f"""{object_name}对象已经存在!""",
- sql=statement)
- else:
- object_name_list.add(object_name)
- if result_set['rows'] > 1000:
- result = ReviewResult(id=line, errlevel=1,
- stagestatus='影响行数大于1000,请关注',
- errormessage='影响行数大于1000,请关注',
- sql=statement,
- affected_rows=result_set['rows'],
- execute_time=0, )
- else:
- result = ReviewResult(id=line, errlevel=0,
- stagestatus='Audit completed',
- errormessage='None',
- sql=statement,
- affected_rows=result_set['rows'],
- execute_time=0, )
- else:
- if result_set['rows'] > 1000:
- result = ReviewResult(id=line, errlevel=1,
- stagestatus='影响行数大于1000,请关注',
- errormessage='影响行数大于1000,请关注',
- sql=statement,
- affected_rows=result_set['rows'],
- execute_time=0, )
- else:
- result = ReviewResult(id=line, errlevel=0,
- stagestatus='Audit completed',
- errormessage='None',
- sql=statement,
- affected_rows=result_set['rows'],
- execute_time=0, )
- # 其它无法用explain判断的语句
- else:
- # 对alter table做对象存在性检查
- if re.match(r"^alter\s+table\s", sql_lower):
- object_name = self.get_sql_first_object_name(sql=sql_lower)
- if '.' in object_name:
- object_name = object_name
- else:
- object_name = f"""{db_name}.{object_name}"""
- if not self.object_name_check(db_name=db_name, object_name=object_name) and object_name not in object_name_list:
- check_result.is_critical = True
- result = ReviewResult(id=line, errlevel=2,
- stagestatus=f"""{object_name}对象不存在!""",
- errormessage=f"""{object_name}对象不存在!""",
- sql=statement)
- else:
- result = ReviewResult(id=line, errlevel=1,
- stagestatus='当前平台,此语法不支持审核!',
- errormessage='当前平台,此语法不支持审核!',
- sql=statement,
- affected_rows=0,
- execute_time=0, )
- # 对create做对象存在性检查
- elif re.match(r"^create", sql_lower):
- object_name = self.get_sql_first_object_name(sql=sql_lower)
- if '.' in object_name:
- object_name = object_name
- else:
- object_name = f"""{db_name}.{object_name}"""
- if self.object_name_check(db_name=db_name,
- object_name=object_name) or object_name in object_name_list:
- check_result.is_critical = True
- result = ReviewResult(id=line, errlevel=2,
- stagestatus=f"""{object_name}对象已经存在!""",
- errormessage=f"""{object_name}对象已经存在!""",
- sql=statement)
- else:
- object_name_list.add(object_name)
- result = ReviewResult(id=line, errlevel=1,
- stagestatus='当前平台,此语法不支持审核!',
- errormessage='当前平台,此语法不支持审核!',
- sql=statement,
- affected_rows=0,
- execute_time=0, )
- else:
- result = ReviewResult(id=line, errlevel=1,
- stagestatus='当前平台,此语法不支持审核!',
- errormessage='当前平台,此语法不支持审核!',
- sql=statement,
- affected_rows=0,
- execute_time=0, )
-
- # 非高危SQL审核
-
- # 判断工单类型
- if get_syntax_type(sql=statement, db_type='oracle') == 'DDL':
- check_result.syntax_type = 1
- check_result.rows += [result]
- # 遇到禁用和高危语句直接返回,提高效率
- if check_result.is_critical:
- check_result.error_count += 1
- return check_result
- line += 1
- except Exception as e:
- logger.warning(f"Oracle 语句执行报错,第{line}个SQL:{statement},错误信息{traceback.format_exc()}")
- check_result.error = str(e)
- finally:
- if close_conn:
- self.close()
+
+ # 把所有SQL转换成SqlItem List。 如有多行(内部有多个;)执行块,约定以delimiter $$作为开始, 以$$结束
+ # 需要在函数里实现单条SQL做sqlparse.format(sql, strip_comments=True)
+ sqlitemList = get_full_sqlitem_list(sql, db_name)
+
+ for sqlitem in sqlitemList:
+ # 禁用语句
+ if re.match(r"^\s*select", sqlitem.statement.lower(), re.I):
+ check_result.is_critical = True
+ result = ReviewResult(id=line, errlevel=2,
+ stagestatus='驳回不支持语句',
+ errormessage='仅支持DML和DDL语句,查询语句请使用SQL查询功能!',
+ sql=sqlitem.statement)
+ # 高危语句
+ elif critical_ddl_regex and p.match(sqlitem.statement.strip().lower()):
+ check_result.is_critical = True
+ result = ReviewResult(id=line, errlevel=2,
+ stagestatus='驳回高危SQL',
+ errormessage='禁止提交匹配' + critical_ddl_regex + '条件的语句!',
+ sql=sqlitem.statement)
+
+ # 正常语句
+ else:
+ result = ReviewResult(id=line, errlevel=0,
+ stagestatus='Audit completed',
+ errormessage='None',
+ sql=sqlitem.statement,
+ stmt_type=sqlitem.stmt_type,
+ object_owner=sqlitem.object_owner,
+ object_type=sqlitem.object_type,
+ object_name=sqlitem.object_name,
+ affected_rows=0,
+ execute_time=0, )
+ # 判断工单类型
+ if get_syntax_type(sqlitem.statement) == 'DDL':
+ check_result.syntax_type = 1
+ check_result.rows += [result]
+
+ # 遇到禁用和高危语句直接返回,提高效率
+ if check_result.is_critical:
+ check_result.error_count += 1
+ return check_result
+ line += 1
return check_result
def execute_workflow(self, workflow, close_conn=True):
- """执行上线单,返回Review set"""
+ """执行上线单,返回Review set
+ 原来的逻辑是根据 sql_content简单来分割SQL,进而再执行这些SQL
+ 新的逻辑变更为根据审核结果中记录的sql来执行,
+ 如果是PLSQL存储过程等对象定义操作,还需检查确认新建对象是否编译通过!
+ """
+ review_content = workflow.sqlworkflowcontent.review_content
+ review_result = json.loads(review_content)
+ sqlitemList = get_exec_sqlitem_list(review_result, workflow.db_name)
+
sql = workflow.sqlworkflowcontent.sql_content
execute_result = ReviewSet(full_sql=sql)
- # 删除注释语句,切分语句,将切换CURRENT_SCHEMA语句增加到切分结果中
- sql = sqlparse.format(sql, strip_comments=True)
- split_sql = [f"ALTER SESSION SET CURRENT_SCHEMA = {workflow.db_name};"] + sqlparse.split(sql)
+
line = 1
statement = None
try:
conn = self.get_connection()
cursor = conn.cursor()
# 逐条执行切分语句,追加到执行结果中
- cursor.execute(f"alter session set nls_date_format='yyyy-mm-dd hh24:mi:ss'")
- cursor.execute(f"select sysdate from dual")
- rows = cursor.fetchone()
- begin_time = rows[0]
- for statement in split_sql:
- statement = statement.rstrip(';')
+ for sqlitem in sqlitemList:
+ statement = sqlitem.statement
+ if sqlitem.stmt_type == "SQL":
+ statement = statement.rstrip(';')
with FuncTimer() as t:
- if statement !='':
- cursor.execute(statement)
- conn.commit()
+ cursor.execute(statement)
+ conn.commit()
+
+ rowcount = cursor.rowcount
+ stagestatus = "Execute Successfully"
+ if sqlitem.stmt_type == "PLSQL" and sqlitem.object_name and sqlitem.object_name != 'ANONYMOUS' and sqlitem.object_name != '':
+ query_obj_sql = f"""SELECT OBJECT_NAME, STATUS, TO_CHAR(LAST_DDL_TIME, 'YYYY-MM-DD HH24:MI:SS') FROM ALL_OBJECTS
+ WHERE OWNER = '{sqlitem.object_owner}'
+ AND OBJECT_NAME = '{sqlitem.object_name}'
+ """
+ cursor.execute(query_obj_sql)
+ row = cursor.fetchone()
+ if row:
+ status = row[1]
+ if status and status == "INVALID":
+ stagestatus = "Compile Failed. Object " + sqlitem.object_owner + "." + sqlitem.object_name + " is invalid."
+ else:
+ stagestatus = "Compile Failed. Object " + sqlitem.object_owner + "." + sqlitem.object_name + " doesn't exist."
+
+ if stagestatus != "Execute Successfully":
+ raise Exception(stagestatus)
+
execute_result.rows.append(ReviewResult(
id=line,
errlevel=0,
- stagestatus='Execute Successfully',
+ stagestatus=stagestatus,
errormessage='None',
sql=statement,
- affected_rows=cursor.rowcount,
+ affected_rows=rowcount,
execute_time=t.cost,
))
line += 1
except Exception as e:
logger.warning(f"Oracle命令执行报错,语句:{statement or sql}, 错误信息:{traceback.format_exc()}")
execute_result.error = str(e)
- #conn.rollback()
# 追加当前报错语句信息到执行结果中
execute_result.rows.append(ReviewResult(
id=line,
@@ -615,111 +327,22 @@ def execute_workflow(self, workflow, close_conn=True):
))
line += 1
# 报错语句后面的语句标记为审核通过、未执行,追加到执行结果中
- for statement in split_sql[line - 1:]:
+ for sqlitem in sqlitemList[line - 1:]:
execute_result.rows.append(ReviewResult(
id=line,
errlevel=0,
stagestatus='Audit completed',
errormessage=f'前序语句失败, 未执行',
- sql=statement,
+ sql=sqlitem.statement,
affected_rows=0,
execute_time=0,
))
line += 1
finally:
- # 生成回滚SQL
- cursor.execute(f"select sysdate from dual")
- rows = cursor.fetchone()
- end_time = rows[0]
- logmnr_start_sql = f'''begin
- dbms_logmnr.start_logmnr(
- starttime=>to_date('{begin_time}','yyyy-mm-dd hh24:mi:ss'),
- endtime=>to_date('{end_time}','yyyy/mm/dd hh24:mi:ss'),
- options=>dbms_logmnr.dict_from_online_catalog + dbms_logmnr.continuous_mine);
- end;'''
- undo_sql = f'''select sql_redo,sql_undo from v$logmnr_contents where
- SEG_OWNER <> 'SYS'
- and session# = (select s.sid from v$session s where s.sid = (select sid from v$mystat where rownum = 1 ))
- and serial# = (select serial# from v$session s where s.sid = (select sid from v$mystat where rownum = 1 )) order by scn desc'''
- logmnr_end_sql = f'''begin
- dbms_logmnr.end_logmnr;
- end;'''
- workflow_id = f"{workflow.sqlworkflowcontent.workflow_id}"
- cursor.execute(logmnr_start_sql)
- cursor.execute(undo_sql)
- rows = cursor.fetchall()
- cursor.execute(logmnr_end_sql)
- self.ora_backup_insert(rows=rows, id=workflow_id)
if close_conn:
self.close()
return execute_result
- def ora_backup_insert(self, rows = [], id = 0):
- # 回滚SQL入库
- # 创建连接
- try:
- conn = self.get_backup_connection()
- cur = conn.cursor()
- cur.execute(f"""create database if not exists ora_backup;""")
- cur.execute(f"use ora_backup;")
- cur.execute(f"""CREATE TABLE if not exists `sql_rollback` (
- `id` bigint(20) NOT NULL AUTO_INCREMENT,
- `redo_sql` mediumtext,
- `undo_sql` mediumtext,
- `workflow_id` bigint(20) NOT NULL,
- PRIMARY KEY (`id`),
- key `idx_sql_rollback_01` (`workflow_id`)
- ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;""")
- if len(rows) > 0:
- for row in rows:
- redo_sql=f"{row[0]}"
- redo_sql=redo_sql.replace("'","\\'")
- undo_sql=f"{row[1]}"
- undo_sql=undo_sql.replace("'","\\'")
- sql = f"""insert into sql_rollback(redo_sql,undo_sql,workflow_id) values('{redo_sql}','{undo_sql}',{id});"""
- cur.execute(sql)
- except Exception as e:
- logger.warning(f"备份失败,错误信息{traceback.format_exc()}")
- return False
- finally:
- # 关闭连接
- if conn:
- conn.close()
- return True
-
- def get_rollback(self, workflow):
- """
- 获取回滚语句,并且按照执行顺序倒序展示,return ['源语句','回滚语句']
- """
- list_execute_result = json.loads(workflow.sqlworkflowcontent.execute_result)
- workflow_id = workflow.sqlworkflowcontent.workflow_id
- # 回滚语句倒序展示
- list_execute_result.reverse()
- list_backup_sql = []
- try:
- # 创建连接
- conn = self.get_backup_connection()
- cur = conn.cursor()
- sql = f"""select redo_sql,undo_sql from sql_rollback where workflow_id = {workflow_id} order by id;"""
- cur.execute(f"use ora_backup;")
- cur.execute(sql)
- list_tables = cur.fetchall()
- for row in list_tables:
- redo_sql = row[0]
- if row[1] is None:
- undo_sql = ' '
- else:
- undo_sql = row[1]
- # 拼接成回滚语句列表,['源语句','回滚语句']
- list_backup_sql.append([redo_sql,undo_sql])
- except Exception as e:
- logger.error(f"获取回滚语句报错,异常信息{traceback.format_exc()}")
- raise Exception(e)
- # 关闭连接
- if conn:
- conn.close()
- return list_backup_sql
-
def close(self):
if self.conn:
self.conn.close()
From 8d3956dd32854ca08c42669ab76c5cd1e89a740d Mon Sep 17 00:00:00 2001
From: jan-song <60806666+jan-song@users.noreply.github.com>
Date: Mon, 6 Apr 2020 20:55:47 +0800
Subject: [PATCH 05/16] Update oracle.py
---
sql/engines/oracle.py | 117 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 117 insertions(+)
diff --git a/sql/engines/oracle.py b/sql/engines/oracle.py
index 27a6afd589..b75674c56e 100644
--- a/sql/engines/oracle.py
+++ b/sql/engines/oracle.py
@@ -4,6 +4,7 @@
import traceback
import re
import sqlparse
+import MySQLdb
import simplejson as json
from common.config import SysConfig
@@ -44,6 +45,26 @@ def name(self):
@property
def info(self):
return 'Oracle engine'
+
+ @property
+ def auto_backup(self):
+ """是否支持备份"""
+ return True
+
+ @staticmethod
+ def get_backup_connection():
+ archer_config = SysConfig()
+ backup_host = archer_config.get('inception_remote_backup_host')
+ backup_port = int(archer_config.get('inception_remote_backup_port', 3306))
+ backup_user = archer_config.get('inception_remote_backup_user')
+ backup_password = archer_config.get('inception_remote_backup_password')
+ return MySQLdb.connect(host=backup_host,
+ port=backup_port,
+ user=backup_user,
+ passwd=backup_password,
+ charset='utf8mb4',
+ autocommit=True
+ )
@property
def server_version(self):
@@ -274,6 +295,11 @@ def execute_workflow(self, workflow, close_conn=True):
try:
conn = self.get_connection()
cursor = conn.cursor()
+ #获取执行工单时间,用于备份SQL的日志挖掘起始时间
+ cursor.execute(f"alter session set nls_date_format='yyyy-mm-dd hh24:mi:ss'")
+ cursor.execute(f"select sysdate from dual")
+ rows = cursor.fetchone()
+ begin_time = rows[0]
# 逐条执行切分语句,追加到执行结果中
for sqlitem in sqlitemList:
statement = sqlitem.statement
@@ -339,10 +365,101 @@ def execute_workflow(self, workflow, close_conn=True):
))
line += 1
finally:
+ # 生成回滚SQL,执行用户需要有grant select any transaction to 权限,需要有grant execute on dbms_logmnr to权限
+ # 数据库需开启最小化附加日志alter database add supplemental log data;
+ # 需为归档模式;开启附件日志会增加redo日志量,一般不会有多大影响,需评估归档磁盘空间,redo磁盘IO性能
+ cursor.execute(f"select sysdate from dual")
+ rows = cursor.fetchone()
+ end_time = rows[0]
+ logmnr_start_sql = f'''begin
+ dbms_logmnr.start_logmnr(
+ starttime=>to_date('{begin_time}','yyyy-mm-dd hh24:mi:ss'),
+ endtime=>to_date('{end_time}','yyyy/mm/dd hh24:mi:ss'),
+ options=>dbms_logmnr.dict_from_online_catalog + dbms_logmnr.continuous_mine);
+ end;'''
+ undo_sql = f'''select sql_redo,sql_undo from v$logmnr_contents where
+ SEG_OWNER not in ('SYS','SYSTEM')
+ and session# = (select s.sid from v$session s where s.sid = (select sid from v$mystat where rownum = 1 ))
+ and serial# = (select serial# from v$session s where s.sid = (select sid from v$mystat where rownum = 1 )) order by scn desc'''
+ logmnr_end_sql = f'''begin
+ dbms_logmnr.end_logmnr;
+ end;'''
+ workflow_id = f"{workflow.sqlworkflowcontent.workflow_id}"
+ cursor.execute(logmnr_start_sql)
+ cursor.execute(undo_sql)
+ rows = cursor.fetchall()
+ cursor.execute(logmnr_end_sql)
+ self.ora_backup_insert(rows=rows, id=workflow_id)
if close_conn:
self.close()
return execute_result
+
+ def ora_backup_insert(self, rows = [], id = 0):
+ # 回滚SQL入库
+ # 创建连接
+ try:
+ conn = self.get_backup_connection()
+ cur = conn.cursor()
+ cur.execute(f"""create database if not exists ora_backup;""")
+ cur.execute(f"use ora_backup;")
+ cur.execute(f"""CREATE TABLE if not exists `sql_rollback` (
+ `id` bigint(20) NOT NULL AUTO_INCREMENT,
+ `redo_sql` mediumtext,
+ `undo_sql` mediumtext,
+ `workflow_id` bigint(20) NOT NULL,
+ PRIMARY KEY (`id`),
+ key `idx_sql_rollback_01` (`workflow_id`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;""")
+ if len(rows) > 0:
+ for row in rows:
+ redo_sql=f"{row[0]}"
+ redo_sql=redo_sql.replace("'","\\'")
+ undo_sql=f"{row[1]}"
+ undo_sql=undo_sql.replace("'","\\'")
+ sql = f"""insert into sql_rollback(redo_sql,undo_sql,workflow_id) values('{redo_sql}','{undo_sql}',{id});"""
+ cur.execute(sql)
+ except Exception as e:
+ logger.warning(f"备份失败,错误信息{traceback.format_exc()}")
+ return False
+ finally:
+ # 关闭连接
+ if conn:
+ conn.close()
+ return True
+ def get_rollback(self, workflow):
+ """
+ 获取回滚语句,并且按照执行顺序倒序展示,return ['源语句','回滚语句']
+ """
+ list_execute_result = json.loads(workflow.sqlworkflowcontent.execute_result)
+ workflow_id = workflow.sqlworkflowcontent.workflow_id
+ # 回滚语句倒序展示
+ list_execute_result.reverse()
+ list_backup_sql = []
+ try:
+ # 创建连接
+ conn = self.get_backup_connection()
+ cur = conn.cursor()
+ sql = f"""select redo_sql,undo_sql from sql_rollback where workflow_id = {workflow_id} order by id;"""
+ cur.execute(f"use ora_backup;")
+ cur.execute(sql)
+ list_tables = cur.fetchall()
+ for row in list_tables:
+ redo_sql = row[0]
+ if row[1] is None:
+ undo_sql = ' '
+ else:
+ undo_sql = row[1]
+ # 拼接成回滚语句列表,['源语句','回滚语句']
+ list_backup_sql.append([redo_sql,undo_sql])
+ except Exception as e:
+ logger.error(f"获取回滚语句报错,异常信息{traceback.format_exc()}")
+ raise Exception(e)
+ # 关闭连接
+ if conn:
+ conn.close()
+ return list_backup_sql
+
def close(self):
if self.conn:
self.conn.close()
From c3ad771318989a7c8da54d5b0c97c4955ef69e80 Mon Sep 17 00:00:00 2001
From: jan-song <60806666+jan-song@users.noreply.github.com>
Date: Mon, 6 Apr 2020 21:01:11 +0800
Subject: [PATCH 06/16] Update oracle.py
---
sql/engines/oracle.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/sql/engines/oracle.py b/sql/engines/oracle.py
index b75674c56e..18b04e8c96 100644
--- a/sql/engines/oracle.py
+++ b/sql/engines/oracle.py
@@ -173,7 +173,7 @@ def filter_sql(self, sql='', limit_num=0):
# 对查询sql增加limit限制
if re.match(r"^select|^with", sql_lower):
if re.match(r"^select|^with", sql_lower):
- return f"select a.* from ({sql.rstrip(';')}) a WHERE ROWNUM <= {limit_num}"
+ sql=f"select a.* from ({sql.rstrip(';')}) a WHERE ROWNUM <= {limit_num}"
return sql.strip()
def query(self, db_name=None, sql='', limit_num=0, close_conn=True, **kwargs):
From 0a04afedfe9b644d37dc44db31d1dcd8f5410e48 Mon Sep 17 00:00:00 2001
From: jan-song <60806666+jan-song@users.noreply.github.com>
Date: Tue, 7 Apr 2020 22:36:20 +0800
Subject: [PATCH 07/16] Update oracle.py
---
sql/engines/oracle.py | 115 ++++++++++++++++--------------------------
1 file changed, 44 insertions(+), 71 deletions(-)
diff --git a/sql/engines/oracle.py b/sql/engines/oracle.py
index 18b04e8c96..60cc082b33 100644
--- a/sql/engines/oracle.py
+++ b/sql/engines/oracle.py
@@ -278,18 +278,12 @@ def execute_check(self, db_name=None, sql=''):
return check_result
def execute_workflow(self, workflow, close_conn=True):
- """执行上线单,返回Review set
- 原来的逻辑是根据 sql_content简单来分割SQL,进而再执行这些SQL
- 新的逻辑变更为根据审核结果中记录的sql来执行,
- 如果是PLSQL存储过程等对象定义操作,还需检查确认新建对象是否编译通过!
- """
- review_content = workflow.sqlworkflowcontent.review_content
- review_result = json.loads(review_content)
- sqlitemList = get_exec_sqlitem_list(review_result, workflow.db_name)
-
+ """执行上线单,返回Review set"""
sql = workflow.sqlworkflowcontent.sql_content
execute_result = ReviewSet(full_sql=sql)
-
+ # 删除注释语句,切分语句,将切换CURRENT_SCHEMA语句增加到切分结果中
+ sql = sqlparse.format(sql, strip_comments=True)
+ split_sql = [f"ALTER SESSION SET CURRENT_SCHEMA = {workflow.db_name};"] + sqlparse.split(sql)
line = 1
statement = None
try:
@@ -301,46 +295,26 @@ def execute_workflow(self, workflow, close_conn=True):
rows = cursor.fetchone()
begin_time = rows[0]
# 逐条执行切分语句,追加到执行结果中
- for sqlitem in sqlitemList:
- statement = sqlitem.statement
- if sqlitem.stmt_type == "SQL":
- statement = statement.rstrip(';')
+ for statement in split_sql:
+ statement = statement.rstrip(';')
with FuncTimer() as t:
- cursor.execute(statement)
- conn.commit()
-
- rowcount = cursor.rowcount
- stagestatus = "Execute Successfully"
- if sqlitem.stmt_type == "PLSQL" and sqlitem.object_name and sqlitem.object_name != 'ANONYMOUS' and sqlitem.object_name != '':
- query_obj_sql = f"""SELECT OBJECT_NAME, STATUS, TO_CHAR(LAST_DDL_TIME, 'YYYY-MM-DD HH24:MI:SS') FROM ALL_OBJECTS
- WHERE OWNER = '{sqlitem.object_owner}'
- AND OBJECT_NAME = '{sqlitem.object_name}'
- """
- cursor.execute(query_obj_sql)
- row = cursor.fetchone()
- if row:
- status = row[1]
- if status and status == "INVALID":
- stagestatus = "Compile Failed. Object " + sqlitem.object_owner + "." + sqlitem.object_name + " is invalid."
- else:
- stagestatus = "Compile Failed. Object " + sqlitem.object_owner + "." + sqlitem.object_name + " doesn't exist."
-
- if stagestatus != "Execute Successfully":
- raise Exception(stagestatus)
-
+ if statement !='':
+ cursor.execute(statement)
+ conn.commit()
execute_result.rows.append(ReviewResult(
id=line,
errlevel=0,
- stagestatus=stagestatus,
+ stagestatus='Execute Successfully',
errormessage='None',
sql=statement,
- affected_rows=rowcount,
+ affected_rows=cursor.rowcount,
execute_time=t.cost,
))
line += 1
except Exception as e:
logger.warning(f"Oracle命令执行报错,语句:{statement or sql}, 错误信息:{traceback.format_exc()}")
execute_result.error = str(e)
+ #conn.rollback()
# 追加当前报错语句信息到执行结果中
execute_result.rows.append(ReviewResult(
id=line,
@@ -353,50 +327,32 @@ def execute_workflow(self, workflow, close_conn=True):
))
line += 1
# 报错语句后面的语句标记为审核通过、未执行,追加到执行结果中
- for sqlitem in sqlitemList[line - 1:]:
+ for statement in split_sql[line - 1:]:
execute_result.rows.append(ReviewResult(
id=line,
errlevel=0,
stagestatus='Audit completed',
errormessage=f'前序语句失败, 未执行',
- sql=sqlitem.statement,
+ sql=statement,
affected_rows=0,
execute_time=0,
))
line += 1
finally:
- # 生成回滚SQL,执行用户需要有grant select any transaction to 权限,需要有grant execute on dbms_logmnr to权限
- # 数据库需开启最小化附加日志alter database add supplemental log data;
- # 需为归档模式;开启附件日志会增加redo日志量,一般不会有多大影响,需评估归档磁盘空间,redo磁盘IO性能
cursor.execute(f"select sysdate from dual")
rows = cursor.fetchone()
end_time = rows[0]
- logmnr_start_sql = f'''begin
- dbms_logmnr.start_logmnr(
- starttime=>to_date('{begin_time}','yyyy-mm-dd hh24:mi:ss'),
- endtime=>to_date('{end_time}','yyyy/mm/dd hh24:mi:ss'),
- options=>dbms_logmnr.dict_from_online_catalog + dbms_logmnr.continuous_mine);
- end;'''
- undo_sql = f'''select sql_redo,sql_undo from v$logmnr_contents where
- SEG_OWNER not in ('SYS','SYSTEM')
- and session# = (select s.sid from v$session s where s.sid = (select sid from v$mystat where rownum = 1 ))
- and serial# = (select serial# from v$session s where s.sid = (select sid from v$mystat where rownum = 1 )) order by scn desc'''
- logmnr_end_sql = f'''begin
- dbms_logmnr.end_logmnr;
- end;'''
- workflow_id = f"{workflow.sqlworkflowcontent.workflow_id}"
- cursor.execute(logmnr_start_sql)
- cursor.execute(undo_sql)
- rows = cursor.fetchall()
- cursor.execute(logmnr_end_sql)
- self.ora_backup_insert(rows=rows, id=workflow_id)
+ self.backup(id=workflow.id,cursor=cursor,begin_time=begin_time,end_time=end_time)
if close_conn:
self.close()
return execute_result
-
- def ora_backup_insert(self, rows = [], id = 0):
+
+ def backup(self,id,cursor,begin_time,end_time):
# 回滚SQL入库
- # 创建连接
+ # 生成回滚SQL,执行用户需要有grant select any transaction to 权限,需要有grant execute on dbms_logmnr to权限
+ # 数据库需开启最小化附加日志alter database add supplemental log data;
+ # 需为归档模式;开启附件日志会增加redo日志量,一般不会有多大影响,需评估归档磁盘空间,redo磁盘IO性能
+ # 创建备份库连接
try:
conn = self.get_backup_connection()
cur = conn.cursor()
@@ -410,11 +366,31 @@ def ora_backup_insert(self, rows = [], id = 0):
PRIMARY KEY (`id`),
key `idx_sql_rollback_01` (`workflow_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;""")
+ logmnr_start_sql = f'''begin
+ dbms_logmnr.start_logmnr(
+ starttime=>to_date('{begin_time}','yyyy-mm-dd hh24:mi:ss'),
+ endtime=>to_date('{end_time}','yyyy/mm/dd hh24:mi:ss'),
+ options=>dbms_logmnr.dict_from_online_catalog + dbms_logmnr.continuous_mine);
+ end;'''
+ undo_sql = f'''select sql_redo,sql_undo from v$logmnr_contents where
+ SEG_OWNER not in ('SYS','SYSTEM')
+ and session# = (select s.sid from v$session s where s.sid = (select sid from v$mystat where rownum = 1 ))
+ and serial# = (select serial# from v$session s where s.sid = (select sid from v$mystat where rownum = 1 )) order by scn desc'''
+ logmnr_end_sql = f'''begin
+ dbms_logmnr.end_logmnr;
+ end;'''
+ cursor.execute(logmnr_start_sql)
+ cursor.execute(undo_sql)
+ rows = cursor.fetchall()
+ cursor.execute(logmnr_end_sql)
if len(rows) > 0:
for row in rows:
redo_sql=f"{row[0]}"
redo_sql=redo_sql.replace("'","\\'")
- undo_sql=f"{row[1]}"
+ if row[1] is None:
+ undo_sql = f' '
+ else:
+ undo_sql=f"{row[1]}"
undo_sql=undo_sql.replace("'","\\'")
sql = f"""insert into sql_rollback(redo_sql,undo_sql,workflow_id) values('{redo_sql}','{undo_sql}',{id});"""
cur.execute(sql)
@@ -432,7 +408,7 @@ def get_rollback(self, workflow):
获取回滚语句,并且按照执行顺序倒序展示,return ['源语句','回滚语句']
"""
list_execute_result = json.loads(workflow.sqlworkflowcontent.execute_result)
- workflow_id = workflow.sqlworkflowcontent.workflow_id
+ workflow_id = workflow.id
# 回滚语句倒序展示
list_execute_result.reverse()
list_backup_sql = []
@@ -446,10 +422,7 @@ def get_rollback(self, workflow):
list_tables = cur.fetchall()
for row in list_tables:
redo_sql = row[0]
- if row[1] is None:
- undo_sql = ' '
- else:
- undo_sql = row[1]
+ undo_sql = row[1]
# 拼接成回滚语句列表,['源语句','回滚语句']
list_backup_sql.append([redo_sql,undo_sql])
except Exception as e:
From d39946d5ed3c94d38eab7d3279c4b13fad06afe1 Mon Sep 17 00:00:00 2001
From: Jan <60806666+jan-song@users.noreply.github.com>
Date: Sun, 12 Apr 2020 22:08:38 +0800
Subject: [PATCH 08/16] Update oracle.py
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
支持Oracle回滚功能,支持查询Oracle执行计划,优化表结构、schema显示效果,修复添加limit限制的bug
---
sql/engines/oracle.py | 109 +++++++++++++++++++++++++-----------------
1 file changed, 66 insertions(+), 43 deletions(-)
diff --git a/sql/engines/oracle.py b/sql/engines/oracle.py
index 60cc082b33..8fc869c510 100644
--- a/sql/engines/oracle.py
+++ b/sql/engines/oracle.py
@@ -4,7 +4,6 @@
import traceback
import re
import sqlparse
-import MySQLdb
import simplejson as json
from common.config import SysConfig
@@ -45,7 +44,7 @@ def name(self):
@property
def info(self):
return 'Oracle engine'
-
+
@property
def auto_backup(self):
"""是否支持备份"""
@@ -65,7 +64,6 @@ def get_backup_connection():
charset='utf8mb4',
autocommit=True
)
-
@property
def server_version(self):
conn = self.get_connection()
@@ -110,7 +108,7 @@ def _get_all_schemas(self):
def get_all_tables(self, db_name, **kwargs):
"""获取table 列表, 返回一个ResultSet"""
- sql = f"""SELECT table_name FROM all_tables WHERE nvl(tablespace_name, 'no tablespace') NOT IN ('SYSTEM', 'SYSAUX') AND OWNER = '{db_name}' AND IOT_NAME IS NULL AND DURATION IS NULL
+ sql = f"""SELECT table_name FROM all_tables WHERE nvl(tablespace_name, 'no tablespace') NOT IN ('SYSTEM', 'SYSAUX') AND OWNER = '{db_name}' AND IOT_NAME IS NULL AND DURATION IS NULL order by table_name
"""
result = self.query(sql=sql)
tb_list = [row[0] for row in result.rows if row[0] not in ['test']]
@@ -134,7 +132,7 @@ def describe_table(self, db_name, tb_name, **kwargs):
nullable,
data_default
FROM all_tab_cols
- WHERE table_name = '{tb_name}' and owner = '{db_name}'
+ WHERE table_name = '{tb_name}' and owner = '{db_name}' order by column_id
"""
result = self.query(sql=sql)
return result
@@ -156,7 +154,7 @@ def query_check(self, db_name=None, sql=''):
return result
if re.match(r"^select|^with|^explain", sql_lower) is None:
result['bad_query'] = True
- result['msg'] = '仅支持^select|^with|^explain查询语法!'
+ result['msg'] = '仅支持^select语法!'
return result
if re.search(star_patter, sql_lower) is not None:
keyword_warning += '禁止使用 * 关键词\n'
@@ -172,8 +170,7 @@ def filter_sql(self, sql='', limit_num=0):
sql_lower = sql.lower()
# 对查询sql增加limit限制
if re.match(r"^select|^with", sql_lower):
- if re.match(r"^select|^with", sql_lower):
- sql=f"select a.* from ({sql.rstrip(';')}) a WHERE ROWNUM <= {limit_num}"
+ sql = f"select a.* from ({sql.rstrip(';')}) a WHERE ROWNUM <= {limit_num}"
return sql.strip()
def query(self, db_name=None, sql='', limit_num=0, close_conn=True, **kwargs):
@@ -184,11 +181,12 @@ def query(self, db_name=None, sql='', limit_num=0, close_conn=True, **kwargs):
cursor = conn.cursor()
if db_name:
cursor.execute(f"ALTER SESSION SET CURRENT_SCHEMA = {db_name}")
+ sql = sql.rstrip(';')
# 支持oralce查询SQL执行计划语句
if re.match(r"^explain", sql, re.I):
- cursor.execute(sql)
- # 重置SQL文本,获取SQL执行计划
- sql = f"select PLAN_TABLE_OUTPUT from table(dbms_xplan.display)"
+ cursor.execute(sql)
+ # 重置SQL文本,获取SQL执行计划
+ sql = f"select PLAN_TABLE_OUTPUT from table(dbms_xplan.display)"
cursor.execute(sql)
fields = cursor.description
if any(x[1] == cx_Oracle.CLOB for x in fields):
@@ -278,43 +276,69 @@ def execute_check(self, db_name=None, sql=''):
return check_result
def execute_workflow(self, workflow, close_conn=True):
- """执行上线单,返回Review set"""
+ """执行上线单,返回Review set
+ 原来的逻辑是根据 sql_content简单来分割SQL,进而再执行这些SQL
+ 新的逻辑变更为根据审核结果中记录的sql来执行,
+ 如果是PLSQL存储过程等对象定义操作,还需检查确认新建对象是否编译通过!
+ """
+ review_content = workflow.sqlworkflowcontent.review_content
+ review_result = json.loads(review_content)
+ sqlitemList = get_exec_sqlitem_list(review_result, workflow.db_name)
+
sql = workflow.sqlworkflowcontent.sql_content
execute_result = ReviewSet(full_sql=sql)
- # 删除注释语句,切分语句,将切换CURRENT_SCHEMA语句增加到切分结果中
- sql = sqlparse.format(sql, strip_comments=True)
- split_sql = [f"ALTER SESSION SET CURRENT_SCHEMA = {workflow.db_name};"] + sqlparse.split(sql)
+
line = 1
statement = None
try:
conn = self.get_connection()
cursor = conn.cursor()
- #获取执行工单时间,用于备份SQL的日志挖掘起始时间
+ # 获取执行工单时间,用于备份SQL的日志挖掘起始时间
cursor.execute(f"alter session set nls_date_format='yyyy-mm-dd hh24:mi:ss'")
cursor.execute(f"select sysdate from dual")
rows = cursor.fetchone()
begin_time = rows[0]
# 逐条执行切分语句,追加到执行结果中
- for statement in split_sql:
- statement = statement.rstrip(';')
+ for sqlitem in sqlitemList:
+ statement = sqlitem.statement
+ if sqlitem.stmt_type == "SQL":
+ statement = statement.rstrip(';')
with FuncTimer() as t:
- if statement !='':
- cursor.execute(statement)
- conn.commit()
+ cursor.execute(statement)
+ conn.commit()
+
+ rowcount = cursor.rowcount
+ stagestatus = "Execute Successfully"
+ if sqlitem.stmt_type == "PLSQL" and sqlitem.object_name and sqlitem.object_name != 'ANONYMOUS' and sqlitem.object_name != '':
+ query_obj_sql = f"""SELECT OBJECT_NAME, STATUS, TO_CHAR(LAST_DDL_TIME, 'YYYY-MM-DD HH24:MI:SS') FROM ALL_OBJECTS
+ WHERE OWNER = '{sqlitem.object_owner}'
+ AND OBJECT_NAME = '{sqlitem.object_name}'
+ """
+ cursor.execute(query_obj_sql)
+ row = cursor.fetchone()
+ if row:
+ status = row[1]
+ if status and status == "INVALID":
+ stagestatus = "Compile Failed. Object " + sqlitem.object_owner + "." + sqlitem.object_name + " is invalid."
+ else:
+ stagestatus = "Compile Failed. Object " + sqlitem.object_owner + "." + sqlitem.object_name + " doesn't exist."
+
+ if stagestatus != "Execute Successfully":
+ raise Exception(stagestatus)
+
execute_result.rows.append(ReviewResult(
id=line,
errlevel=0,
- stagestatus='Execute Successfully',
+ stagestatus=stagestatus,
errormessage='None',
sql=statement,
- affected_rows=cursor.rowcount,
+ affected_rows=rowcount,
execute_time=t.cost,
))
line += 1
except Exception as e:
logger.warning(f"Oracle命令执行报错,语句:{statement or sql}, 错误信息:{traceback.format_exc()}")
execute_result.error = str(e)
- #conn.rollback()
# 追加当前报错语句信息到执行结果中
execute_result.rows.append(ReviewResult(
id=line,
@@ -327,13 +351,13 @@ def execute_workflow(self, workflow, close_conn=True):
))
line += 1
# 报错语句后面的语句标记为审核通过、未执行,追加到执行结果中
- for statement in split_sql[line - 1:]:
+ for sqlitem in sqlitemList[line - 1:]:
execute_result.rows.append(ReviewResult(
id=line,
errlevel=0,
stagestatus='Audit completed',
errormessage=f'前序语句失败, 未执行',
- sql=statement,
+ sql=sqlitem.statement,
affected_rows=0,
execute_time=0,
))
@@ -342,12 +366,12 @@ def execute_workflow(self, workflow, close_conn=True):
cursor.execute(f"select sysdate from dual")
rows = cursor.fetchone()
end_time = rows[0]
- self.backup(id=workflow.id,cursor=cursor,begin_time=begin_time,end_time=end_time)
+ self.backup(workflow_id=workflow.id, cursor=cursor, begin_time=begin_time, end_time=end_time)
if close_conn:
self.close()
return execute_result
- def backup(self,id,cursor,begin_time,end_time):
+ def backup(self,workflow_id,cursor,begin_time,end_time):
# 回滚SQL入库
# 生成回滚SQL,执行用户需要有grant select any transaction to 权限,需要有grant execute on dbms_logmnr to权限
# 数据库需开启最小化附加日志alter database add supplemental log data;
@@ -367,18 +391,18 @@ def backup(self,id,cursor,begin_time,end_time):
key `idx_sql_rollback_01` (`workflow_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;""")
logmnr_start_sql = f'''begin
- dbms_logmnr.start_logmnr(
- starttime=>to_date('{begin_time}','yyyy-mm-dd hh24:mi:ss'),
- endtime=>to_date('{end_time}','yyyy/mm/dd hh24:mi:ss'),
- options=>dbms_logmnr.dict_from_online_catalog + dbms_logmnr.continuous_mine);
- end;'''
- undo_sql = f'''select sql_redo,sql_undo from v$logmnr_contents where
- SEG_OWNER not in ('SYS','SYSTEM')
- and session# = (select s.sid from v$session s where s.sid = (select sid from v$mystat where rownum = 1 ))
- and serial# = (select serial# from v$session s where s.sid = (select sid from v$mystat where rownum = 1 )) order by scn desc'''
+ dbms_logmnr.start_logmnr(
+ starttime=>to_date('{begin_time}','yyyy-mm-dd hh24:mi:ss'),
+ endtime=>to_date('{end_time}','yyyy/mm/dd hh24:mi:ss'),
+ options=>dbms_logmnr.dict_from_online_catalog + dbms_logmnr.continuous_mine);
+ end;'''
+ undo_sql = f'''select sql_redo,sql_undo from v$logmnr_contents
+ where SEG_OWNER not in ('SYS','SYSTEM')
+ and session# = (select sid from v$mystat where rownum = 1)
+ and serial# = (select serial# from v$session s where s.sid = (select sid from v$mystat where rownum = 1 )) order by scn desc'''
logmnr_end_sql = f'''begin
- dbms_logmnr.end_logmnr;
- end;'''
+ dbms_logmnr.end_logmnr;
+ end;'''
cursor.execute(logmnr_start_sql)
cursor.execute(undo_sql)
rows = cursor.fetchall()
@@ -392,7 +416,7 @@ def backup(self,id,cursor,begin_time,end_time):
else:
undo_sql=f"{row[1]}"
undo_sql=undo_sql.replace("'","\\'")
- sql = f"""insert into sql_rollback(redo_sql,undo_sql,workflow_id) values('{redo_sql}','{undo_sql}',{id});"""
+ sql = f"""insert into sql_rollback(redo_sql,undo_sql,workflow_id) values('{redo_sql}','{undo_sql}',{workflow_id});"""
cur.execute(sql)
except Exception as e:
logger.warning(f"备份失败,错误信息{traceback.format_exc()}")
@@ -408,7 +432,6 @@ def get_rollback(self, workflow):
获取回滚语句,并且按照执行顺序倒序展示,return ['源语句','回滚语句']
"""
list_execute_result = json.loads(workflow.sqlworkflowcontent.execute_result)
- workflow_id = workflow.id
# 回滚语句倒序展示
list_execute_result.reverse()
list_backup_sql = []
@@ -416,7 +439,7 @@ def get_rollback(self, workflow):
# 创建连接
conn = self.get_backup_connection()
cur = conn.cursor()
- sql = f"""select redo_sql,undo_sql from sql_rollback where workflow_id = {workflow_id} order by id;"""
+ sql = f"""select redo_sql,undo_sql from sql_rollback where workflow_id = {workflow.id} order by id;"""
cur.execute(f"use ora_backup;")
cur.execute(sql)
list_tables = cur.fetchall()
@@ -432,7 +455,7 @@ def get_rollback(self, workflow):
if conn:
conn.close()
return list_backup_sql
-
+
def close(self):
if self.conn:
self.conn.close()
From cfa36856df02ec3851779627fa1fe56ffa1026b5 Mon Sep 17 00:00:00 2001
From: Jan <60806666+jan-song@users.noreply.github.com>
Date: Sun, 12 Apr 2020 22:12:08 +0800
Subject: [PATCH 09/16] Update oracle.py
---
sql/engines/oracle.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/sql/engines/oracle.py b/sql/engines/oracle.py
index 8fc869c510..afb5aff7c1 100644
--- a/sql/engines/oracle.py
+++ b/sql/engines/oracle.py
@@ -4,6 +4,7 @@
import traceback
import re
import sqlparse
+import MySQLdb
import simplejson as json
from common.config import SysConfig
From 5d88a69b286c7169b95b97b2e0d4519c9aba80d4 Mon Sep 17 00:00:00 2001
From: Jan <60806666+jan-song@users.noreply.github.com>
Date: Fri, 24 Apr 2020 20:34:38 +0800
Subject: [PATCH 10/16] Add files via upload
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Oracle执行计划查看支持
---
sql/templates/sqlquery.html | 26 +++++++++++++++++++-------
1 file changed, 19 insertions(+), 7 deletions(-)
diff --git a/sql/templates/sqlquery.html b/sql/templates/sqlquery.html
index 5ca630ec7b..74364fca95 100644
--- a/sql/templates/sqlquery.html
+++ b/sql/templates/sqlquery.html
@@ -777,6 +777,7 @@
收藏语句
//将数据通过ajax提交给后端进行检查
function sqlquery(sql) {
+ var optgroup = $('#instance_name :selected').parent().attr('label');
var select_sqlContent = editor.session.getTextRange(editor.getSelectionRange());
if (select_sqlContent) {
sqlContent = select_sqlContent
@@ -784,13 +785,24 @@ 收藏语句
var sqlContent = editor.getValue();
}
- //查看执行计划
- if (sql === 'explain') {
+ if (optgroup === "Oracle") {
+ //查看执行计划
+ if (sql === 'explain') {
+ sqlContent = 'explain plan for ' + sqlContent
+ }
+ //查看表结构
+ else if (sql === 'show create table') {
+ sqlContent = "desc " + $("#table_name").val() + ";"
+ }
+ } else if (optgroup === "MySQL") {
+ //查看执行计划
+ if (sql === 'explain') {
sqlContent = 'explain ' + sqlContent
- }
- //查看表结构
- else if (sql === 'show create table') {
- sqlContent = "show create table " + $("#table_name").val() + ";"
+ }
+ //查看表结构
+ else if (sql === 'show create table') {
+ sqlContent = "show create table " + $("#table_name").val() + ";"
+ }
}
//提交请求
$.ajax({
@@ -863,7 +875,7 @@ 收藏语句
redis_help_tab_remove();
}
$("#btn-format").attr('disabled', false);
- $("#btn-explain").attr('disabled', true);
+ $("#btn-explain").attr('disabled', false);
} else if (optgroup === "Mongo") {
if (change) {
$("#div-table_name").show();
From dcde64411584b5b5082b05f4e506b0693913614f Mon Sep 17 00:00:00 2001
From: Jan <60806666+jan-song@users.noreply.github.com>
Date: Fri, 24 Apr 2020 20:37:05 +0800
Subject: [PATCH 11/16] Add files via upload
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
oracle 新增SQL tunning advisor
---
sql/templates/sqladvisor.html | 1960 ++++++++++++++++++---------------
1 file changed, 1047 insertions(+), 913 deletions(-)
diff --git a/sql/templates/sqladvisor.html b/sql/templates/sqladvisor.html
index 4b60545358..e9de1a00fc 100644
--- a/sql/templates/sqladvisor.html
+++ b/sql/templates/sqladvisor.html
@@ -1,913 +1,1047 @@
-{% extends "base.html" %}
-
-{% block content %}
-
-
-
-
-
- 优化建议
-
-
-
-
-
-
-
-
-
-
-
-
- TABLE STRUCTURE
-
-
-
-
TABLE INFO
-
-
-
-
INDEX INFO
-
-
-
-
-
-
-
-
-{% endblock content %}
-
-{% block js %}
- {% load static %}
-
-
-
-
-
-
-
-
-{% endblock %}
+{% extends "base.html" %}
+
+{% block content %}
+
+
+
+
+
+ 优化建议
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ TABLE STRUCTURE
+
+
+
+
TABLE INFO
+
+
+
+
INDEX INFO
+
+
+
+
+
+
+
+
+{% endblock content %}
+
+{% block js %}
+ {% load static %}
+
+
+
+
+
+
+
+
+{% endblock %}
\ No newline at end of file
From 7d7d696f224af2325f4ffee6507472507717a412 Mon Sep 17 00:00:00 2001
From: Jan <60806666+jan-song@users.noreply.github.com>
Date: Fri, 24 Apr 2020 20:41:33 +0800
Subject: [PATCH 12/16] Add files via upload
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
添加Oracle sql tuning advisor
---
sql/sql_optimize.py | 497 ++++++++++++++++++++++++--------------------
1 file changed, 273 insertions(+), 224 deletions(-)
diff --git a/sql/sql_optimize.py b/sql/sql_optimize.py
index 161eb6fba7..0d0049fb0c 100644
--- a/sql/sql_optimize.py
+++ b/sql/sql_optimize.py
@@ -1,224 +1,273 @@
-# -*- coding: UTF-8 -*-
-"""
-@author: hhyo
-@license: Apache Licence
-@file: sql_optimize.py
-@time: 2019/03/04
-"""
-import re
-
-import simplejson as json
-import sqlparse
-from django.contrib.auth.decorators import permission_required
-from django.http import HttpResponse
-from common.config import SysConfig
-from common.utils.extend_json_encoder import ExtendJSONEncoder
-from sql.engines import get_engine
-from sql.models import Instance
-from sql.plugins.soar import Soar
-from sql.plugins.sqladvisor import SQLAdvisor
-from sql.sql_tuning import SqlTuning
-from sql.utils.resource_group import user_instances
-
-__author__ = 'hhyo'
-
-
-@permission_required('sql.optimize_sqladvisor', raise_exception=True)
-def optimize_sqladvisor(request):
- sql_content = request.POST.get('sql_content')
- instance_name = request.POST.get('instance_name')
- db_name = request.POST.get('db_name')
- verbose = request.POST.get('verbose', 1)
- result = {'status': 0, 'msg': 'ok', 'data': []}
-
- # 服务器端参数验证
- if sql_content is None or instance_name is None:
- result['status'] = 1
- result['msg'] = '页面提交参数可能为空'
- return HttpResponse(json.dumps(result), content_type='application/json')
-
- try:
- instance_info = user_instances(request.user, db_type=['mysql']).get(instance_name=instance_name)
- except Instance.DoesNotExist:
- result['status'] = 1
- result['msg'] = '你所在组未关联该实例!'
- return HttpResponse(json.dumps(result), content_type='application/json')
-
- # 检查sqladvisor程序路径
- sqladvisor_path = SysConfig().get('sqladvisor')
- if sqladvisor_path is None:
- result['status'] = 1
- result['msg'] = '请配置SQLAdvisor路径!'
- return HttpResponse(json.dumps(result), content_type='application/json')
-
- # 提交给sqladvisor获取分析报告
- sqladvisor = SQLAdvisor()
- # 准备参数
- args = {"h": instance_info.host,
- "P": instance_info.port,
- "u": instance_info.user,
- "p": instance_info.password,
- "d": db_name,
- "v": verbose,
- "q": sql_content.strip().replace('"', '\\"').replace('`', '').replace('\n', ' ')
- }
-
- # 参数检查
- args_check_result = sqladvisor.check_args(args)
- if args_check_result['status'] == 1:
- return HttpResponse(json.dumps(args_check_result), content_type='application/json')
- # 参数转换
- cmd_args = sqladvisor.generate_args2cmd(args, shell=True)
- # 执行命令
- try:
- stdout, stderr = sqladvisor.execute_cmd(cmd_args, shell=True).communicate()
- result['data'] = f'{stdout}{stderr}'
- except RuntimeError as e:
- result['status'] = 1
- result['msg'] = str(e)
- return HttpResponse(json.dumps(result), content_type='application/json')
-
-
-@permission_required('sql.optimize_soar', raise_exception=True)
-def optimize_soar(request):
- instance_name = request.POST.get('instance_name')
- db_name = request.POST.get('db_name')
- sql = request.POST.get('sql')
- result = {'status': 0, 'msg': 'ok', 'data': []}
-
- # 服务器端参数验证
- if not (instance_name and db_name and sql):
- result['status'] = 1
- result['msg'] = '页面提交参数可能为空'
- return HttpResponse(json.dumps(result), content_type='application/json')
- try:
- instance_info = user_instances(request.user, db_type=['mysql']).get(instance_name=instance_name)
- except Exception:
- result['status'] = 1
- result['msg'] = '你所在组未关联该实例'
- return HttpResponse(json.dumps(result), content_type='application/json')
-
- # 检查测试实例的连接信息和soar程序路径
- soar_test_dsn = SysConfig().get('soar_test_dsn')
- soar_path = SysConfig().get('soar')
- if not (soar_path and soar_test_dsn):
- result['status'] = 1
- result['msg'] = '请配置soar_path和test_dsn!'
- return HttpResponse(json.dumps(result), content_type='application/json')
-
- # 目标实例的连接信息
- online_dsn = "{user}:{pwd}@{host}:{port}/{db}".format(user=instance_info.user,
- pwd=instance_info.password,
- host=instance_info.host,
- port=instance_info.port,
- db=db_name)
-
- # 提交给soar获取分析报告
- soar = Soar()
- # 准备参数
- args = {"online-dsn": online_dsn,
- "test-dsn": soar_test_dsn,
- "allow-online-as-test": "false",
- "report-type": "markdown",
- "query": sql.strip().replace('"', '\\"').replace('`', '').replace('\n', ' ')
- }
- # 参数检查
- args_check_result = soar.check_args(args)
- if args_check_result['status'] == 1:
- return HttpResponse(json.dumps(args_check_result), content_type='application/json')
- # 参数转换
- cmd_args = soar.generate_args2cmd(args, shell=True)
- # 执行命令
- try:
- stdout, stderr = soar.execute_cmd(cmd_args, shell=True).communicate()
- result['data'] = stdout if stdout else stderr
- except RuntimeError as e:
- result['status'] = 1
- result['msg'] = str(e)
- return HttpResponse(json.dumps(result), content_type='application/json')
-
-
-@permission_required('sql.optimize_sqltuning', raise_exception=True)
-def optimize_sqltuning(request):
- instance_name = request.POST.get('instance_name')
- db_name = request.POST.get('db_name')
- sqltext = request.POST.get('sql_content')
- option = request.POST.getlist('option[]')
-
- try:
- user_instances(request.user).get(instance_name=instance_name)
- except Instance.DoesNotExist:
- result = {'status': 1, 'msg': '你所在组未关联该实例!', 'data': []}
- return HttpResponse(json.dumps(result), content_type='application/json')
-
- sql_tunning = SqlTuning(instance_name=instance_name, db_name=db_name, sqltext=sqltext)
- result = {'status': 0, 'msg': 'ok', 'data': {}}
- if 'sys_parm' in option:
- basic_information = sql_tunning.basic_information()
- sys_parameter = sql_tunning.sys_parameter()
- optimizer_switch = sql_tunning.optimizer_switch()
- result['data']['basic_information'] = basic_information
- result['data']['sys_parameter'] = sys_parameter
- result['data']['optimizer_switch'] = optimizer_switch
- if 'sql_plan' in option:
- plan, optimizer_rewrite_sql = sql_tunning.sqlplan()
- result['data']['optimizer_rewrite_sql'] = optimizer_rewrite_sql
- result['data']['plan'] = plan
- if 'obj_stat' in option:
- result['data']['object_statistics'] = sql_tunning.object_statistics()
- if 'sql_profile' in option:
- session_status = sql_tunning.exec_sql()
- result['data']['session_status'] = session_status
- # 关闭连接
- sql_tunning.engine.close()
- result['data']['sqltext'] = sqltext
- return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
- content_type='application/json')
-
-
-def explain(request):
- """
- SQL优化界面获取SQL执行计划
- :param request:
- :return:
- """
- sql_content = request.POST.get('sql_content')
- instance_name = request.POST.get('instance_name')
- db_name = request.POST.get('db_name')
- result = {'status': 0, 'msg': 'ok', 'data': []}
-
- # 服务器端参数验证
- if sql_content is None or instance_name is None:
- result['status'] = 1
- result['msg'] = '页面提交参数可能为空'
- return HttpResponse(json.dumps(result), content_type='application/json')
-
- try:
- instance = user_instances(request.user).get(instance_name=instance_name)
- except Instance.DoesNotExist:
- result = {'status': 1, 'msg': '实例不存在', 'data': []}
- return HttpResponse(json.dumps(result), content_type='application/json')
-
- # 删除注释语句,进行语法判断,执行第一条有效sql
- sql_content = sqlparse.format(sql_content.strip(), strip_comments=True)
- try:
- sql_content = sqlparse.split(sql_content)[0]
- except IndexError:
- result['status'] = 1
- result['msg'] = '没有有效的SQL语句'
- return HttpResponse(json.dumps(result), content_type='application/json')
- else:
- # 过滤非explain的语句
- if not re.match(r"^explain", sql_content, re.I):
- result['status'] = 1
- result['msg'] = '仅支持explain开头的语句,请检查'
- return HttpResponse(json.dumps(result), content_type='application/json')
-
- # 执行获取执行计划语句
- query_engine = get_engine(instance=instance)
- sql_result = query_engine.query(str(db_name), sql_content).to_sep_dict()
- result['data'] = sql_result
-
- # 返回查询结果
- return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
- content_type='application/json')
+# -*- coding: UTF-8 -*-
+"""
+@author: hhyo
+@license: Apache Licence
+@file: sql_optimize.py
+@time: 2019/03/04
+"""
+import re
+
+import simplejson as json
+import sqlparse
+from django.contrib.auth.decorators import permission_required
+from django.http import HttpResponse
+from common.config import SysConfig
+from common.utils.extend_json_encoder import ExtendJSONEncoder
+from sql.engines import get_engine
+from sql.models import Instance
+from sql.plugins.soar import Soar
+from sql.plugins.sqladvisor import SQLAdvisor
+from sql.sql_tuning import SqlTuning
+from sql.utils.resource_group import user_instances
+
+__author__ = 'hhyo'
+
+
+@permission_required('sql.optimize_sqladvisor', raise_exception=True)
+def optimize_sqladvisor(request):
+ sql_content = request.POST.get('sql_content')
+ instance_name = request.POST.get('instance_name')
+ db_name = request.POST.get('db_name')
+ verbose = request.POST.get('verbose', 1)
+ result = {'status': 0, 'msg': 'ok', 'data': []}
+
+ # 服务器端参数验证
+ if sql_content is None or instance_name is None:
+ result['status'] = 1
+ result['msg'] = '页面提交参数可能为空'
+ return HttpResponse(json.dumps(result), content_type='application/json')
+
+ try:
+ instance_info = user_instances(request.user, db_type=['mysql']).get(instance_name=instance_name)
+ except Instance.DoesNotExist:
+ result['status'] = 1
+ result['msg'] = '你所在组未关联该实例!'
+ return HttpResponse(json.dumps(result), content_type='application/json')
+
+ # 检查sqladvisor程序路径
+ sqladvisor_path = SysConfig().get('sqladvisor')
+ if sqladvisor_path is None:
+ result['status'] = 1
+ result['msg'] = '请配置SQLAdvisor路径!'
+ return HttpResponse(json.dumps(result), content_type='application/json')
+
+ # 提交给sqladvisor获取分析报告
+ sqladvisor = SQLAdvisor()
+ # 准备参数
+ args = {"h": instance_info.host,
+ "P": instance_info.port,
+ "u": instance_info.user,
+ "p": instance_info.password,
+ "d": db_name,
+ "v": verbose,
+ "q": sql_content.strip().replace('"', '\\"').replace('`', '').replace('\n', ' ')
+ }
+
+ # 参数检查
+ args_check_result = sqladvisor.check_args(args)
+ if args_check_result['status'] == 1:
+ return HttpResponse(json.dumps(args_check_result), content_type='application/json')
+ # 参数转换
+ cmd_args = sqladvisor.generate_args2cmd(args, shell=True)
+ # 执行命令
+ try:
+ stdout, stderr = sqladvisor.execute_cmd(cmd_args, shell=True).communicate()
+ result['data'] = f'{stdout}{stderr}'
+ except RuntimeError as e:
+ result['status'] = 1
+ result['msg'] = str(e)
+ return HttpResponse(json.dumps(result), content_type='application/json')
+
+
+@permission_required('sql.optimize_soar', raise_exception=True)
+def optimize_soar(request):
+ instance_name = request.POST.get('instance_name')
+ db_name = request.POST.get('db_name')
+ sql = request.POST.get('sql')
+ result = {'status': 0, 'msg': 'ok', 'data': []}
+
+ # 服务器端参数验证
+ if not (instance_name and db_name and sql):
+ result['status'] = 1
+ result['msg'] = '页面提交参数可能为空'
+ return HttpResponse(json.dumps(result), content_type='application/json')
+ try:
+ instance_info = user_instances(request.user, db_type=['mysql']).get(instance_name=instance_name)
+ except Exception:
+ result['status'] = 1
+ result['msg'] = '你所在组未关联该实例'
+ return HttpResponse(json.dumps(result), content_type='application/json')
+
+ # 检查测试实例的连接信息和soar程序路径
+ soar_test_dsn = SysConfig().get('soar_test_dsn')
+ soar_path = SysConfig().get('soar')
+ if not (soar_path and soar_test_dsn):
+ result['status'] = 1
+ result['msg'] = '请配置soar_path和test_dsn!'
+ return HttpResponse(json.dumps(result), content_type='application/json')
+
+ # 目标实例的连接信息
+ online_dsn = "{user}:{pwd}@{host}:{port}/{db}".format(user=instance_info.user,
+ pwd=instance_info.password,
+ host=instance_info.host,
+ port=instance_info.port,
+ db=db_name)
+
+ # 提交给soar获取分析报告
+ soar = Soar()
+ # 准备参数
+ args = {"online-dsn": online_dsn,
+ "test-dsn": soar_test_dsn,
+ "allow-online-as-test": "false",
+ "report-type": "markdown",
+ "query": sql.strip().replace('"', '\\"').replace('`', '').replace('\n', ' ')
+ }
+ # 参数检查
+ args_check_result = soar.check_args(args)
+ if args_check_result['status'] == 1:
+ return HttpResponse(json.dumps(args_check_result), content_type='application/json')
+ # 参数转换
+ cmd_args = soar.generate_args2cmd(args, shell=True)
+ # 执行命令
+ try:
+ stdout, stderr = soar.execute_cmd(cmd_args, shell=True).communicate()
+ result['data'] = stdout if stdout else stderr
+ except RuntimeError as e:
+ result['status'] = 1
+ result['msg'] = str(e)
+ return HttpResponse(json.dumps(result), content_type='application/json')
+
+
+@permission_required('sql.optimize_sqltuning', raise_exception=True)
+def optimize_sqltuning(request):
+ instance_name = request.POST.get('instance_name')
+ db_name = request.POST.get('db_name')
+ sqltext = request.POST.get('sql_content')
+ option = request.POST.getlist('option[]')
+
+ try:
+ user_instances(request.user).get(instance_name=instance_name)
+ except Instance.DoesNotExist:
+ result = {'status': 1, 'msg': '你所在组未关联该实例!', 'data': []}
+ return HttpResponse(json.dumps(result), content_type='application/json')
+
+ sql_tunning = SqlTuning(instance_name=instance_name, db_name=db_name, sqltext=sqltext)
+ result = {'status': 0, 'msg': 'ok', 'data': {}}
+ if 'sys_parm' in option:
+ basic_information = sql_tunning.basic_information()
+ sys_parameter = sql_tunning.sys_parameter()
+ optimizer_switch = sql_tunning.optimizer_switch()
+ result['data']['basic_information'] = basic_information
+ result['data']['sys_parameter'] = sys_parameter
+ result['data']['optimizer_switch'] = optimizer_switch
+ if 'sql_plan' in option:
+ plan, optimizer_rewrite_sql = sql_tunning.sqlplan()
+ result['data']['optimizer_rewrite_sql'] = optimizer_rewrite_sql
+ result['data']['plan'] = plan
+ if 'obj_stat' in option:
+ result['data']['object_statistics'] = sql_tunning.object_statistics()
+ if 'sql_profile' in option:
+ session_status = sql_tunning.exec_sql()
+ result['data']['session_status'] = session_status
+ # 关闭连接
+ sql_tunning.engine.close()
+ result['data']['sqltext'] = sqltext
+ return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
+ content_type='application/json')
+
+
+def explain(request):
+ """
+ SQL优化界面获取SQL执行计划
+ :param request:
+ :return:
+ """
+ sql_content = request.POST.get('sql_content')
+ instance_name = request.POST.get('instance_name')
+ db_name = request.POST.get('db_name')
+ result = {'status': 0, 'msg': 'ok', 'data': []}
+
+ # 服务器端参数验证
+ if sql_content is None or instance_name is None:
+ result['status'] = 1
+ result['msg'] = '页面提交参数可能为空'
+ return HttpResponse(json.dumps(result), content_type='application/json')
+
+ try:
+ instance = user_instances(request.user).get(instance_name=instance_name)
+ except Instance.DoesNotExist:
+ result = {'status': 1, 'msg': '实例不存在', 'data': []}
+ return HttpResponse(json.dumps(result), content_type='application/json')
+
+ # 删除注释语句,进行语法判断,执行第一条有效sql
+ sql_content = sqlparse.format(sql_content.strip(), strip_comments=True)
+ try:
+ sql_content = sqlparse.split(sql_content)[0]
+ except IndexError:
+ result['status'] = 1
+ result['msg'] = '没有有效的SQL语句'
+ return HttpResponse(json.dumps(result), content_type='application/json')
+ else:
+ # 过滤非explain的语句
+ if not re.match(r"^explain", sql_content, re.I):
+ result['status'] = 1
+ result['msg'] = '仅支持explain开头的语句,请检查'
+ return HttpResponse(json.dumps(result), content_type='application/json')
+
+ # 执行获取执行计划语句
+ query_engine = get_engine(instance=instance)
+ sql_result = query_engine.query(str(db_name), sql_content).to_sep_dict()
+ result['data'] = sql_result
+
+ # 返回查询结果
+ return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
+ content_type='application/json')
+
+def optimize_sqltuningadvisor(request):
+ """
+ sqltuningadvisor工具获取优化报告
+ :param request:
+ :return:
+ """
+ sql_content = request.POST.get('sql_content')
+ instance_name = request.POST.get('instance_name')
+ db_name = request.POST.get('schema_name')
+ result = {'status': 0, 'msg': 'ok', 'data': []}
+
+ # 服务器端参数验证
+ if sql_content is None or instance_name is None:
+ result['status'] = 1
+ result['msg'] = '页面提交参数可能为空'
+ return HttpResponse(json.dumps(result), content_type='application/json')
+
+ try:
+ instance = user_instances(request.user).get(instance_name=instance_name)
+ except Instance.DoesNotExist:
+ result = {'status': 1, 'msg': '实例不存在', 'data': []}
+ return HttpResponse(json.dumps(result), content_type='application/json')
+
+ # 不删除注释语句,已获取加hints的SQL优化建议,进行语法判断,执行第一条有效sql
+ sql_content = sqlparse.format(sql_content.strip(), strip_comments=False)
+ # 对单引号加转义符,支持plsql语法
+ sql_content = sql_content.replace("'", "''");
+ try:
+ sql_content = sqlparse.split(sql_content)[0]
+ except IndexError:
+ result['status'] = 1
+ result['msg'] = '没有有效的SQL语句'
+ return HttpResponse(json.dumps(result), content_type='application/json')
+ else:
+ # 过滤非Oracle语句
+ if not instance.db_type == 'oracle':
+ result['status'] = 1
+ result['msg'] = 'SQLTuningAdvisor仅支持oracle数据库的检查'
+ return HttpResponse(json.dumps(result), content_type='application/json')
+
+ # 执行获取执行计划语句
+ query_engine = get_engine(instance=instance)
+ sql_result = query_engine.sqltuningadvisor(str(db_name), sql_content).to_sep_dict()
+ result['data'] = sql_result
+
+ # 返回查询结果
+ return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
+ content_type='application/json')
\ No newline at end of file
From 70ea71303b73a4fea180035a87b15b26f95a80ad Mon Sep 17 00:00:00 2001
From: Jan <60806666+jan-song@users.noreply.github.com>
Date: Fri, 24 Apr 2020 20:43:38 +0800
Subject: [PATCH 13/16] Add files via upload
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
oracle sql tuning advisor支持
---
sql/urls.py | 303 ++++++++++++++++++++++++++--------------------------
1 file changed, 152 insertions(+), 151 deletions(-)
diff --git a/sql/urls.py b/sql/urls.py
index df24b44a55..7f350fb839 100644
--- a/sql/urls.py
+++ b/sql/urls.py
@@ -1,151 +1,152 @@
-# -*- coding: UTF-8 -*-
-
-from django.urls import path
-from django.views.i18n import JavaScriptCatalog
-
-import sql.instance_database
-import sql.query_privileges
-import sql.sql_optimize
-from common import auth, config, workflow, dashboard, check
-from sql import views, sql_workflow, sql_analyze, query, slowlog, instance, instance_account, db_diagnostic, \
- resource_group, binlog, data_dictionary, archiver
-from sql.utils import tasks
-from common.utils import ding_api
-
-urlpatterns = [
- path('', views.index),
- path('jsi18n/', JavaScriptCatalog.as_view(), name='javascript-catalog'),
- path('index/', views.index),
- path('login/', views.login, name='login'),
- path('logout/', auth.sign_out),
- path('signup/', auth.sign_up),
- path('sqlworkflow/', views.sqlworkflow),
- path('submitsql/', views.submit_sql),
- path('editsql/', views.submit_sql),
- path('submitotherinstance/', views.submit_sql),
- path('detail//', views.detail, name='detail'),
- path('autoreview/', sql_workflow.submit),
- path('passed/', sql_workflow.passed),
- path('execute/', sql_workflow.execute),
- path('timingtask/', sql_workflow.timing_task),
- path('alter_run_date/', sql_workflow.alter_run_date),
- path('cancel/', sql_workflow.cancel),
- path('rollback/', views.rollback),
- path('sqlanalyze/', views.sqlanalyze),
- path('sqlquery/', views.sqlquery),
- path('slowquery/', views.slowquery),
- path('sqladvisor/', views.sqladvisor),
- path('slowquery_advisor/', views.sqladvisor),
- path('queryapplylist/', views.queryapplylist),
- path('queryapplydetail//', views.queryapplydetail, name='queryapplydetail'),
- path('queryuserprivileges/', views.queryuserprivileges),
- path('dbdiagnostic/', views.dbdiagnostic),
- path('workflow/', views.workflows),
- path('workflow//', views.workflowsdetail),
- path('dbaprinciples/', views.dbaprinciples),
- path('dashboard/', dashboard.pyecharts),
- path('group/', views.group),
- path('grouprelations//', views.groupmgmt),
- path('instance/', views.instance),
- path('instanceaccount/', views.instanceaccount),
- path('database/', views.database),
- path('instanceparam/', views.instance_param),
- path('binlog2sql/', views.binlog2sql),
- path('schemasync/', views.schemasync),
- path('archive/', views.archive),
- path('archive//', views.archive_detail, name='archive_detail'),
- path('config/', views.config),
-
- path('authenticate/', auth.authenticate_entry),
- path('sqlworkflow_list/', sql_workflow.sql_workflow_list),
- path('sqlworkflow/detail_content/', sql_workflow.detail_content),
- path('sqlworkflow/backup_sql/', sql_workflow.backup_sql),
- path('simplecheck/', sql_workflow.check),
- path('getWorkflowStatus/', sql_workflow.get_workflow_status),
- path('del_sqlcronjob/', tasks.del_schedule),
- path('inception/osc_control/', sql_workflow.osc_control),
-
- path('sql_analyze/generate/', sql_analyze.generate),
- path('sql_analyze/analyze/', sql_analyze.analyze),
-
- path('workflow/list/', workflow.lists),
- path('workflow/log/', workflow.log),
- path('config/change/', config.change_config),
-
- path('check/inception/', check.inception),
- path('check/go_inception/', check.go_inception),
- path('check/email/', check.email),
- path('check/instance/', check.instance),
-
- path('group/group/', resource_group.group),
- path('group/addrelation/', resource_group.addrelation),
- path('group/relations/', resource_group.associated_objects),
- path('group/instances/', resource_group.instances),
- path('group/unassociated/', resource_group.unassociated_objects),
- path('group/auditors/', resource_group.auditors),
- path('group/changeauditors/', resource_group.changeauditors),
- path('group/user_all_instances/', resource_group.user_all_instances),
-
- path('instance/list/', instance.lists),
-
- path('instance/user/list', instance_account.users),
- path('instance/user/create/', instance_account.create),
- path('instance/user/edit/', instance_account.edit),
- path('instance/user/grant/', instance_account.grant),
- path('instance/user/reset_pwd/', instance_account.reset_pwd),
- path('instance/user/delete/', instance_account.delete),
-
- path('instance/database/list/', sql.instance_database.databases),
- path('instance/database/create/', sql.instance_database.create),
- path('instance/database/edit/', sql.instance_database.edit),
-
- path('instance/schemasync/', instance.schemasync),
- path('instance/instance_resource/', instance.instance_resource),
- path('instance/describetable/', instance.describe),
-
- path('data_dictionary/', views.data_dictionary),
- path('data_dictionary/table_list/', data_dictionary.table_list),
- path('data_dictionary/table_info/', data_dictionary.table_info),
- path('data_dictionary/export/', data_dictionary.export),
-
- path('param/list/', instance.param_list),
- path('param/history/', instance.param_history),
- path('param/edit/', instance.param_edit),
-
- path('query/', query.query),
- path('query/querylog/', query.querylog),
- path('query/favorite/', query.favorite),
- path('query/explain/', sql.sql_optimize.explain),
- path('query/applylist/', sql.query_privileges.query_priv_apply_list),
- path('query/userprivileges/', sql.query_privileges.user_query_priv),
- path('query/applyforprivileges/', sql.query_privileges.query_priv_apply),
- path('query/modifyprivileges/', sql.query_privileges.query_priv_modify),
- path('query/privaudit/', sql.query_privileges.query_priv_audit),
-
- path('binlog/list/', binlog.binlog_list),
- path('binlog/binlog2sql/', binlog.binlog2sql),
- path('binlog/del_log/', binlog.del_binlog),
-
- path('slowquery/review/', slowlog.slowquery_review),
- path('slowquery/review_history/', slowlog.slowquery_review_history),
- path('slowquery/optimize_sqladvisor/', sql.sql_optimize.optimize_sqladvisor),
- path('slowquery/optimize_sqltuning/', sql.sql_optimize.optimize_sqltuning),
- path('slowquery/optimize_soar/', sql.sql_optimize.optimize_soar),
- path('slowquery/report/', slowlog.report),
-
- path('db_diagnostic/process/', db_diagnostic.process),
- path('db_diagnostic/create_kill_session/', db_diagnostic.create_kill_session),
- path('db_diagnostic/kill_session/', db_diagnostic.kill_session),
- path('db_diagnostic/tablesapce/', db_diagnostic.tablesapce),
- path('db_diagnostic/trxandlocks/', db_diagnostic.trxandlocks),
- path('db_diagnostic/innodb_trx/', db_diagnostic.innodb_trx),
-
- path('archive/list/', archiver.archive_list),
- path('archive/apply/', archiver.archive_apply),
- path('archive/audit/', archiver.archive_audit),
- path('archive/switch/', archiver.archive_switch),
- path('archive/once/', archiver.archive_once),
- path('archive/log/', archiver.archive_log),
-
- path('4admin/sync_ding_user/', ding_api.sync_ding_user)
-]
+# -*- coding: UTF-8 -*-
+
+from django.urls import path
+from django.views.i18n import JavaScriptCatalog
+
+import sql.instance_database
+import sql.query_privileges
+import sql.sql_optimize
+from common import auth, config, workflow, dashboard, check
+from sql import views, sql_workflow, sql_analyze, query, slowlog, instance, instance_account, db_diagnostic, \
+ resource_group, binlog, data_dictionary, archiver
+from sql.utils import tasks
+from common.utils import ding_api
+
+urlpatterns = [
+ path('', views.index),
+ path('jsi18n/', JavaScriptCatalog.as_view(), name='javascript-catalog'),
+ path('index/', views.index),
+ path('login/', views.login, name='login'),
+ path('logout/', auth.sign_out),
+ path('signup/', auth.sign_up),
+ path('sqlworkflow/', views.sqlworkflow),
+ path('submitsql/', views.submit_sql),
+ path('editsql/', views.submit_sql),
+ path('submitotherinstance/', views.submit_sql),
+ path('detail//', views.detail, name='detail'),
+ path('autoreview/', sql_workflow.submit),
+ path('passed/', sql_workflow.passed),
+ path('execute/', sql_workflow.execute),
+ path('timingtask/', sql_workflow.timing_task),
+ path('alter_run_date/', sql_workflow.alter_run_date),
+ path('cancel/', sql_workflow.cancel),
+ path('rollback/', views.rollback),
+ path('sqlanalyze/', views.sqlanalyze),
+ path('sqlquery/', views.sqlquery),
+ path('slowquery/', views.slowquery),
+ path('sqladvisor/', views.sqladvisor),
+ path('slowquery_advisor/', views.sqladvisor),
+ path('queryapplylist/', views.queryapplylist),
+ path('queryapplydetail//', views.queryapplydetail, name='queryapplydetail'),
+ path('queryuserprivileges/', views.queryuserprivileges),
+ path('dbdiagnostic/', views.dbdiagnostic),
+ path('workflow/', views.workflows),
+ path('workflow//', views.workflowsdetail),
+ path('dbaprinciples/', views.dbaprinciples),
+ path('dashboard/', dashboard.pyecharts),
+ path('group/', views.group),
+ path('grouprelations//', views.groupmgmt),
+ path('instance/', views.instance),
+ path('instanceaccount/', views.instanceaccount),
+ path('database/', views.database),
+ path('instanceparam/', views.instance_param),
+ path('binlog2sql/', views.binlog2sql),
+ path('schemasync/', views.schemasync),
+ path('archive/', views.archive),
+ path('archive//', views.archive_detail, name='archive_detail'),
+ path('config/', views.config),
+
+ path('authenticate/', auth.authenticate_entry),
+ path('sqlworkflow_list/', sql_workflow.sql_workflow_list),
+ path('sqlworkflow/detail_content/', sql_workflow.detail_content),
+ path('sqlworkflow/backup_sql/', sql_workflow.backup_sql),
+ path('simplecheck/', sql_workflow.check),
+ path('getWorkflowStatus/', sql_workflow.get_workflow_status),
+ path('del_sqlcronjob/', tasks.del_schedule),
+ path('inception/osc_control/', sql_workflow.osc_control),
+
+ path('sql_analyze/generate/', sql_analyze.generate),
+ path('sql_analyze/analyze/', sql_analyze.analyze),
+
+ path('workflow/list/', workflow.lists),
+ path('workflow/log/', workflow.log),
+ path('config/change/', config.change_config),
+
+ path('check/inception/', check.inception),
+ path('check/go_inception/', check.go_inception),
+ path('check/email/', check.email),
+ path('check/instance/', check.instance),
+
+ path('group/group/', resource_group.group),
+ path('group/addrelation/', resource_group.addrelation),
+ path('group/relations/', resource_group.associated_objects),
+ path('group/instances/', resource_group.instances),
+ path('group/unassociated/', resource_group.unassociated_objects),
+ path('group/auditors/', resource_group.auditors),
+ path('group/changeauditors/', resource_group.changeauditors),
+ path('group/user_all_instances/', resource_group.user_all_instances),
+
+ path('instance/list/', instance.lists),
+
+ path('instance/user/list', instance_account.users),
+ path('instance/user/create/', instance_account.create),
+ path('instance/user/edit/', instance_account.edit),
+ path('instance/user/grant/', instance_account.grant),
+ path('instance/user/reset_pwd/', instance_account.reset_pwd),
+ path('instance/user/delete/', instance_account.delete),
+
+ path('instance/database/list/', sql.instance_database.databases),
+ path('instance/database/create/', sql.instance_database.create),
+ path('instance/database/edit/', sql.instance_database.edit),
+
+ path('instance/schemasync/', instance.schemasync),
+ path('instance/instance_resource/', instance.instance_resource),
+ path('instance/describetable/', instance.describe),
+
+ path('data_dictionary/', views.data_dictionary),
+ path('data_dictionary/table_list/', data_dictionary.table_list),
+ path('data_dictionary/table_info/', data_dictionary.table_info),
+ path('data_dictionary/export/', data_dictionary.export),
+
+ path('param/list/', instance.param_list),
+ path('param/history/', instance.param_history),
+ path('param/edit/', instance.param_edit),
+
+ path('query/', query.query),
+ path('query/querylog/', query.querylog),
+ path('query/favorite/', query.favorite),
+ path('query/explain/', sql.sql_optimize.explain),
+ path('query/applylist/', sql.query_privileges.query_priv_apply_list),
+ path('query/userprivileges/', sql.query_privileges.user_query_priv),
+ path('query/applyforprivileges/', sql.query_privileges.query_priv_apply),
+ path('query/modifyprivileges/', sql.query_privileges.query_priv_modify),
+ path('query/privaudit/', sql.query_privileges.query_priv_audit),
+
+ path('binlog/list/', binlog.binlog_list),
+ path('binlog/binlog2sql/', binlog.binlog2sql),
+ path('binlog/del_log/', binlog.del_binlog),
+
+ path('slowquery/review/', slowlog.slowquery_review),
+ path('slowquery/review_history/', slowlog.slowquery_review_history),
+ path('slowquery/optimize_sqladvisor/', sql.sql_optimize.optimize_sqladvisor),
+ path('slowquery/optimize_sqltuning/', sql.sql_optimize.optimize_sqltuning),
+ path('slowquery/optimize_soar/', sql.sql_optimize.optimize_soar),
+ path('slowquery/optimize_sqltuningadvisor/', sql.sql_optimize.optimize_sqltuningadvisor),
+ path('slowquery/report/', slowlog.report),
+
+ path('db_diagnostic/process/', db_diagnostic.process),
+ path('db_diagnostic/create_kill_session/', db_diagnostic.create_kill_session),
+ path('db_diagnostic/kill_session/', db_diagnostic.kill_session),
+ path('db_diagnostic/tablesapce/', db_diagnostic.tablesapce),
+ path('db_diagnostic/trxandlocks/', db_diagnostic.trxandlocks),
+ path('db_diagnostic/innodb_trx/', db_diagnostic.innodb_trx),
+
+ path('archive/list/', archiver.archive_list),
+ path('archive/apply/', archiver.archive_apply),
+ path('archive/audit/', archiver.archive_audit),
+ path('archive/switch/', archiver.archive_switch),
+ path('archive/once/', archiver.archive_once),
+ path('archive/log/', archiver.archive_log),
+
+ path('4admin/sync_ding_user/', ding_api.sync_ding_user)
+]
\ No newline at end of file
From b4aa0fe291b9b5c9084816ff4265c434c6f7a799 Mon Sep 17 00:00:00 2001
From: Jan <60806666+jan-song@users.noreply.github.com>
Date: Fri, 24 Apr 2020 20:45:24 +0800
Subject: [PATCH 14/16] Add files via upload
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
添加Oracle SQL类型判断
---
sql/utils/sql_utils.py | 604 +++++++++++++++++++++--------------------
1 file changed, 303 insertions(+), 301 deletions(-)
diff --git a/sql/utils/sql_utils.py b/sql/utils/sql_utils.py
index cc0b3ec47a..e4aa196684 100644
--- a/sql/utils/sql_utils.py
+++ b/sql/utils/sql_utils.py
@@ -1,301 +1,303 @@
-# -*- coding: UTF-8 -*-
-"""
-@author: hhyo
-@license: Apache Licence
-@file: sql_utils.py
-@time: 2019/03/13
-"""
-import re
-import xml
-import mybatis_mapper2sql
-import sqlparse
-
-from sql.engines.models import SqlItem
-from sql.utils.extract_tables import extract_tables as extract_tables_by_sql_parse
-
-__author__ = 'hhyo'
-
-
-def get_syntax_type(sql, parser=True, db_type='mysql'):
- """
- 返回SQL语句类型,仅判断DDL和DML
- :param sql:
- :param parser: 是否使用sqlparse解析
- :param db_type: 不使用sqlparse解析时需要提供该参数
- :return:
- """
- sql = remove_comments(sql=sql, db_type=db_type)
- if parser:
- try:
- statement = sqlparse.parse(sql)[0]
- syntax_type = statement.token_first(skip_cm=True).ttype.__str__()
- if syntax_type == 'Token.Keyword.DDL':
- syntax_type = 'DDL'
- elif syntax_type == 'Token.Keyword.DML':
- syntax_type = 'DML'
- except Exception:
- syntax_type = None
- else:
- if db_type == 'mysql':
- ddl_re = r"^alter|^create|^drop|^rename|^truncate"
- dml_re = r"^call|^delete|^do|^handler|^insert|^load\s+data|^load\s+xml|^replace|^select|^update"
- else:
- # TODO 其他数据库的解析正则
- return None
- if re.match(ddl_re, sql, re.I):
- syntax_type = 'DDL'
- elif re.match(dml_re, sql, re.I):
- syntax_type = 'DML'
- else:
- syntax_type = None
- return syntax_type
-
-
-def remove_comments(sql, db_type='mysql'):
- """
- 去除SQL语句中的注释信息
- 来源:https://stackoverflow.com/questions/35647841/parse-sql-file-with-comments-into-sqlite-with-python
- :param sql:
- :param db_type:
- :return:
- """
- sql_comments_re = {
- 'oracle':
- [r'(?:--)[^\n]*\n', r'(?:\W|^)(?:remark|rem)\s+[^\n]*\n'],
- 'mysql':
- [r'(?:#|--\s)[^\n]*\n']
- }
- specific_comment_re = sql_comments_re[db_type]
- additional_patterns = "|"
- if isinstance(specific_comment_re, str):
- additional_patterns += specific_comment_re
- elif isinstance(specific_comment_re, list):
- additional_patterns += "|".join(specific_comment_re)
- pattern = r"(\".*?\"|\'.*?\')|(/\*.*?\*/{})".format(additional_patterns)
- regex = re.compile(pattern, re.MULTILINE | re.DOTALL)
-
- def _replacer(match):
- if match.group(2):
- return ""
- else:
- return match.group(1)
-
- return regex.sub(_replacer, sql).strip()
-
-
-def extract_tables(sql):
- """
- 获取sql语句中的库、表名
- :param sql:
- :return:
- """
- tables = list()
- for i in extract_tables_by_sql_parse(sql):
- tables.append({
- "schema": i.schema,
- "name": i.name,
- })
- return tables
-
-
-def generate_sql(text):
- """
- 从SQL文本、MyBatis3 Mapper XML file文件中解析出sql 列表
- :param text:
- :return: [{"sql_id": key, "sql": soar.compress(value)}]
- """
- # 尝试XML解析
- try:
- mapper, xml_raw_text = mybatis_mapper2sql.create_mapper(xml_raw_text=text)
- statements = mybatis_mapper2sql.get_statement(mapper, result_type='list')
- rows = []
- # 压缩SQL语句,方便展示
- for statement in statements:
- for key, value in statement.items():
- row = {"sql_id": key, "sql": value}
- rows.append(row)
- except xml.etree.ElementTree.ParseError:
- # 删除注释语句
- text = sqlparse.format(text, strip_comments=True)
- statements = sqlparse.split(text)
- rows = []
- num = 0
- for statement in statements:
- num = num + 1
- row = {"sql_id": num, "sql": statement}
- rows.append(row)
- return rows
-
-
-def get_base_sqlitem_list(full_sql):
- ''' 把参数 full_sql 转变为 SqlItem列表
- :param full_sql: 完整sql字符串, 每个SQL以分号;间隔, 不包含plsql执行块和plsql对象定义块
- :return: SqlItem对象列表
- '''
- list = []
- for statement in sqlparse.split(full_sql):
- statement = sqlparse.format(statement, strip_comments=True)
- if len(statement) <= 0:
- continue
- item = SqlItem(statement=statement)
- list.append(item)
- return list
-
-
-def get_full_sqlitem_list(full_sql, db_name):
- ''' 获取Sql对应的SqlItem列表, 包括PLSQL部分
- PLSQL语句块由delimiter $$作为开始间隔符,以$$作为结束间隔符
- :param full_sql: 全部sql内容
- :return: SqlItem 列表
- '''
- list = []
-
- # 定义开始分隔符,两端用括号,是为了re.split()返回列表包含分隔符
- regex_delimiter = r'(delimiter\s*\$\$)'
- # 注意:必须把package body置于package之前,否则将永远匹配不上package body
- regex_objdefine = r'create\s+or\s+replace\s+(function|procedure|trigger|package\s+body|package|view)\s+("?\w+"?\.)?"?\w+"?[\s+|\(]'
- # 对象命名,两端有双引号
- regex_objname = r'^".+"$'
-
- sql_list = re.split(pattern=regex_delimiter, string=full_sql, flags=re.I)
-
- # delimiter_flag => 分隔符标记, 0:不是, 1:是
- # 遇到分隔符标记为1, 则本块SQL要去判断是否有PLSQL内容
- # PLSQL内容存在判定依据, 本块SQL包含'$$'
-
- delimiter_flag = 0
- for sql in sql_list:
- # 截去首尾空格和多余空字符
- sql = sql.strip()
-
- # 如果字符串长度为0, 跳过该字符串
- if len(sql) <= 0:
- continue
-
- # 表示这一行是分隔符, 跳过该字符串
- if re.match(regex_delimiter, sql):
- delimiter_flag = 1
- continue
-
- if delimiter_flag == 1:
- # 表示SQL块为delimiter $$标记之后的内容
-
- # 查找是否存在'$$'结束符
- pos = sql.find("$$")
- length = len(sql)
- if pos > -1:
- # 该sqlitem包含结束符$$
- # 处理PLSQL语句块, 这里需要先去判定语句块的类型
- plsql_block = sql[0:pos].strip()
- # 如果plsql_area字符串最后一个字符为/,则把/给去掉
- while True:
- if plsql_block[-1:] == '/':
- plsql_block = plsql_block[:-1].strip()
- else:
- break
-
- search_result = re.search(regex_objdefine, plsql_block, flags=re.I)
-
- # 检索关键字, 分为两个情况
- # 情况1:plsql block 为对象定义执行块
- # 情况2:plsql block 为匿名执行块
-
- if search_result:
-
- # 检索到关键字, 属于情况1
-
- str_plsql_match = search_result.group()
- str_plsql_type = search_result.groups()[0]
-
- idx = str_plsql_match.index(str_plsql_type)
- nm_str = str_plsql_match[idx + len(str_plsql_type):].strip()
-
- if nm_str[-1:] == '(':
- nm_str = nm_str[:-1]
- nm_list = nm_str.split('.')
-
- if len(nm_list) > 1:
- # 带有属主的对象名, 形如object_owner.object_name
-
- # 获取object_owner
- if re.match(regex_objname, nm_list[0]):
- # object_owner两端带有双引号
- object_owner = nm_list[0].strip().strip('"')
- else:
- # object_owner两端不带有双引号
- object_owner = nm_list[0].upper().strip().strip("'")
-
- # 获取object_name
- if re.match(regex_objname, nm_list[1]):
- # object_name两端带有双引号
- object_name = nm_list[1].strip().strip('"')
- else:
- # object_name两端不带有双引号
- object_name = nm_list[1].upper().strip()
- else:
- # 不带属主
- object_owner = db_name
- if re.match(regex_objname, nm_list[0]):
- # object_name两端带有双引号
- object_name = nm_list[0].strip().strip('"')
- else:
- # object_name两端不带有双引号
- object_name = nm_list[0].upper().strip()
-
- tmp_object_type = str_plsql_type.upper()
- tmp_stmt_type = 'PLSQL'
- if tmp_object_type == 'VIEW':
- tmp_stmt_type = 'SQL'
-
- item = SqlItem(statement=plsql_block,
- stmt_type=tmp_stmt_type,
- object_owner=object_owner,
- object_type=tmp_object_type,
- object_name=object_name)
- list.append(item)
- else:
- # 未检索到关键字, 属于情况2, 匿名可执行块 it's ANONYMOUS
- item = SqlItem(statement=plsql_block.strip(),
- stmt_type='PLSQL',
- object_owner=db_name,
- object_type='ANONYMOUS',
- object_name='ANONYMOUS')
- list.append(item)
-
- if length > pos + 2:
- # 处理$$之后的那些语句, 默认为单条可执行SQL的集合
- sql_area = sql[pos + 2:].strip()
- if len(sql_area) > 0:
- tmp_list = get_base_sqlitem_list(sql_area)
- list.extend(tmp_list)
-
- else:
- # 没有匹配到$$标记, 默认为单条可执行SQL集合
- tmp_list = get_base_sqlitem_list(sql)
- list.extend(tmp_list)
-
- # 处理完本次delimiter标记的内容,把delimiter_flag重置
- delimiter_flag = 0
- else:
- # 表示当前为以;结尾的正常sql
- tmp_list = get_base_sqlitem_list(sql)
- list.extend(tmp_list)
- return list
-
-
-def get_exec_sqlitem_list(reviewResult, db_name):
- """ 根据审核结果生成新的SQL列表
- :param reviewResult: SQL审核结果列表
- :param db_name:
- :return:
- """
- list = []
- list.append(SqlItem(statement=f"ALTER SESSION SET CURRENT_SCHEMA = {db_name}"))
-
- for item in reviewResult:
- list.append(SqlItem(statement=item['sql'],
- stmt_type=item['stmt_type'],
- object_owner=item['object_owner'],
- object_type=item['object_type'],
- object_name=item['object_name']))
- return list
+# -*- coding: UTF-8 -*-
+"""
+@author: hhyo
+@license: Apache Licence
+@file: sql_utils.py
+@time: 2019/03/13
+"""
+import re
+import xml
+import mybatis_mapper2sql
+import sqlparse
+
+from sql.engines.models import SqlItem
+from sql.utils.extract_tables import extract_tables as extract_tables_by_sql_parse
+
+__author__ = 'hhyo'
+
+
+def get_syntax_type(sql, parser=True, db_type='mysql'):
+ """
+ 返回SQL语句类型,仅判断DDL和DML
+ :param sql:
+ :param parser: 是否使用sqlparse解析
+ :param db_type: 不使用sqlparse解析时需要提供该参数
+ :return:
+ """
+ sql = remove_comments(sql=sql, db_type=db_type)
+ if parser:
+ try:
+ statement = sqlparse.parse(sql)[0]
+ syntax_type = statement.token_first(skip_cm=True).ttype.__str__()
+ if syntax_type == 'Token.Keyword.DDL':
+ syntax_type = 'DDL'
+ elif syntax_type == 'Token.Keyword.DML':
+ syntax_type = 'DML'
+ except Exception:
+ syntax_type = None
+ else:
+ if db_type == 'mysql':
+ ddl_re = r"^alter|^create|^drop|^rename|^truncate"
+ dml_re = r"^call|^delete|^do|^handler|^insert|^load\s+data|^load\s+xml|^replace|^select|^update"
+ elif db_type == 'oracle':
+ ddl_re = r"^alter|^create|^drop|^rename|^truncate"
+ dml_re = r"^delete|^exec|^insert|^select|^update|^with|^merge"
+ else:
+ # TODO 其他数据库的解析正则
+ return None
+ if re.match(ddl_re, sql, re.I):
+ syntax_type = 'DDL'
+ elif re.match(dml_re, sql, re.I):
+ syntax_type = 'DML'
+ else:
+ syntax_type = None
+ return syntax_type
+
+
+def remove_comments(sql, db_type='mysql'):
+ """
+ 去除SQL语句中的注释信息
+ 来源:https://stackoverflow.com/questions/35647841/parse-sql-file-with-comments-into-sqlite-with-python
+ :param sql:
+ :param db_type:
+ :return:
+ """
+ sql_comments_re = {
+ 'oracle':
+ [r'(?:--)[^\n]*\n', r'(?:\W|^)(?:remark|rem)\s+[^\n]*\n'],
+ 'mysql':
+ [r'(?:#|--\s)[^\n]*\n']
+ }
+ specific_comment_re = sql_comments_re[db_type]
+ additional_patterns = "|"
+ if isinstance(specific_comment_re, str):
+ additional_patterns += specific_comment_re
+ elif isinstance(specific_comment_re, list):
+ additional_patterns += "|".join(specific_comment_re)
+ pattern = r"(\".*?\"|\'.*?\')|(/\*.*?\*/{})".format(additional_patterns)
+ regex = re.compile(pattern, re.MULTILINE | re.DOTALL)
+
+ def _replacer(match):
+ if match.group(2):
+ return ""
+ else:
+ return match.group(1)
+
+ return regex.sub(_replacer, sql).strip()
+
+
+def extract_tables(sql):
+ """
+ 获取sql语句中的库、表名
+ :param sql:
+ :return:
+ """
+ tables = list()
+ for i in extract_tables_by_sql_parse(sql):
+ tables.append({
+ "schema": i.schema,
+ "name": i.name,
+ })
+ return tables
+
+
+def generate_sql(text):
+ """
+ 从SQL文本、MyBatis3 Mapper XML file文件中解析出sql 列表
+ :param text:
+ :return: [{"sql_id": key, "sql": soar.compress(value)}]
+ """
+ # 尝试XML解析
+ try:
+ mapper, xml_raw_text = mybatis_mapper2sql.create_mapper(xml_raw_text=text)
+ statements = mybatis_mapper2sql.get_statement(mapper, result_type='list')
+ rows = []
+ # 压缩SQL语句,方便展示
+ for statement in statements:
+ for key, value in statement.items():
+ row = {"sql_id": key, "sql": value}
+ rows.append(row)
+ except xml.etree.ElementTree.ParseError:
+ # 删除注释语句
+ text = sqlparse.format(text, strip_comments=True)
+ statements = sqlparse.split(text)
+ rows = []
+ num = 0
+ for statement in statements:
+ num = num + 1
+ row = {"sql_id": num, "sql": statement}
+ rows.append(row)
+ return rows
+
+def get_base_sqlitem_list(full_sql):
+ ''' 把参数 full_sql 转变为 SqlItem列表
+ :param full_sql: 完整sql字符串, 每个SQL以分号;间隔, 不包含plsql执行块和plsql对象定义块
+ :return: SqlItem对象列表
+ '''
+ list = []
+ for statement in sqlparse.split(full_sql):
+ statement = sqlparse.format(statement, strip_comments=True, reindent=True, keyword_case='lower')
+ if len(statement) <= 0:
+ continue
+ item = SqlItem(statement=statement)
+ list.append(item)
+ return list
+
+
+def get_full_sqlitem_list(full_sql, db_name):
+ ''' 获取Sql对应的SqlItem列表, 包括PLSQL部分
+ PLSQL语句块由delimiter $$作为开始间隔符,以$$作为结束间隔符
+ :param full_sql: 全部sql内容
+ :return: SqlItem 列表
+ '''
+ list = []
+
+ # 定义开始分隔符,两端用括号,是为了re.split()返回列表包含分隔符
+ regex_delimiter = r'(delimiter\s*\$\$)'
+ # 注意:必须把package body置于package之前,否则将永远匹配不上package body
+ regex_objdefine = r'create\s+or\s+replace\s+(function|procedure|trigger|package\s+body|package|view)\s+("?\w+"?\.)?"?\w+"?[\s+|\(]'
+ # 对象命名,两端有双引号
+ regex_objname = r'^".+"$'
+
+ sql_list = re.split(pattern=regex_delimiter, string=full_sql, flags=re.I)
+
+ # delimiter_flag => 分隔符标记, 0:不是, 1:是
+ # 遇到分隔符标记为1, 则本块SQL要去判断是否有PLSQL内容
+ # PLSQL内容存在判定依据, 本块SQL包含'$$'
+
+ delimiter_flag = 0
+ for sql in sql_list:
+ # 截去首尾空格和多余空字符
+ sql = sql.strip()
+
+ # 如果字符串长度为0, 跳过该字符串
+ if len(sql) <= 0:
+ continue
+
+ # 表示这一行是分隔符, 跳过该字符串
+ if re.match(regex_delimiter, sql):
+ delimiter_flag = 1
+ continue
+
+ if delimiter_flag == 1:
+ # 表示SQL块为delimiter $$标记之后的内容
+
+ # 查找是否存在'$$'结束符
+ pos = sql.find("$$")
+ length = len(sql)
+ if pos > -1:
+ # 该sqlitem包含结束符$$
+ # 处理PLSQL语句块, 这里需要先去判定语句块的类型
+ plsql_block = sql[0:pos].strip()
+ # 如果plsql_area字符串最后一个字符为/,则把/给去掉
+ while True:
+ if plsql_block[-1:] == '/':
+ plsql_block = plsql_block[:-1].strip()
+ else:
+ break
+
+ search_result = re.search(regex_objdefine, plsql_block, flags=re.I)
+
+ # 检索关键字, 分为两个情况
+ # 情况1:plsql block 为对象定义执行块
+ # 情况2:plsql block 为匿名执行块
+
+ if search_result:
+
+ # 检索到关键字, 属于情况1
+
+ str_plsql_match = search_result.group()
+ str_plsql_type = search_result.groups()[0]
+
+ idx = str_plsql_match.index(str_plsql_type)
+ nm_str = str_plsql_match[idx + len(str_plsql_type):].strip()
+
+ if nm_str[-1:] == '(':
+ nm_str = nm_str[:-1]
+ nm_list = nm_str.split('.')
+
+ if len(nm_list) > 1:
+ # 带有属主的对象名, 形如object_owner.object_name
+
+ # 获取object_owner
+ if re.match(regex_objname, nm_list[0]):
+ # object_owner两端带有双引号
+ object_owner = nm_list[0].strip().strip('"')
+ else:
+ # object_owner两端不带有双引号
+ object_owner = nm_list[0].upper().strip().strip("'")
+
+ # 获取object_name
+ if re.match(regex_objname, nm_list[1]):
+ # object_name两端带有双引号
+ object_name = nm_list[1].strip().strip('"')
+ else:
+ # object_name两端不带有双引号
+ object_name = nm_list[1].upper().strip()
+ else:
+ # 不带属主
+ object_owner = db_name
+ if re.match(regex_objname, nm_list[0]):
+ # object_name两端带有双引号
+ object_name = nm_list[0].strip().strip('"')
+ else:
+ # object_name两端不带有双引号
+ object_name = nm_list[0].upper().strip()
+
+ tmp_object_type = str_plsql_type.upper()
+ tmp_stmt_type = 'PLSQL'
+ if tmp_object_type == 'VIEW':
+ tmp_stmt_type = 'SQL'
+
+ item = SqlItem(statement=plsql_block,
+ stmt_type=tmp_stmt_type,
+ object_owner=object_owner,
+ object_type=tmp_object_type,
+ object_name=object_name)
+ list.append(item)
+ else:
+ # 未检索到关键字, 属于情况2, 匿名可执行块 it's ANONYMOUS
+ item = SqlItem(statement=plsql_block.strip(),
+ stmt_type='PLSQL',
+ object_owner=db_name,
+ object_type='ANONYMOUS',
+ object_name='ANONYMOUS')
+ list.append(item)
+
+ if length > pos + 2:
+ # 处理$$之后的那些语句, 默认为单条可执行SQL的集合
+ sql_area = sql[pos + 2:].strip()
+ if len(sql_area) > 0:
+ tmp_list = get_base_sqlitem_list(sql_area)
+ list.extend(tmp_list)
+
+ else:
+ # 没有匹配到$$标记, 默认为单条可执行SQL集合
+ tmp_list = get_base_sqlitem_list(sql)
+ list.extend(tmp_list)
+
+ # 处理完本次delimiter标记的内容,把delimiter_flag重置
+ delimiter_flag = 0
+ else:
+ # 表示当前为以;结尾的正常sql
+ tmp_list = get_base_sqlitem_list(sql)
+ list.extend(tmp_list)
+ return list
+
+
+def get_exec_sqlitem_list(reviewResult, db_name):
+ """ 根据审核结果生成新的SQL列表
+ :param reviewResult: SQL审核结果列表
+ :param db_name:
+ :return:
+ """
+ list = []
+ list.append(SqlItem(statement=f"ALTER SESSION SET CURRENT_SCHEMA = {db_name}"))
+
+ for item in reviewResult:
+ list.append(SqlItem(statement=item['sql'],
+ stmt_type=item['stmt_type'],
+ object_owner=item['object_owner'],
+ object_type=item['object_type'],
+ object_name=item['object_name']))
+ return list
\ No newline at end of file
From 64995a6526d20a527142c14cd2ceae697fbaf710 Mon Sep 17 00:00:00 2001
From: Jan <60806666+jan-song@users.noreply.github.com>
Date: Fri, 24 Apr 2020 20:48:25 +0800
Subject: [PATCH 15/16] Add files via upload
---
sql/engines/models.py | 282 +++++++++++++++++++++---------------------
1 file changed, 141 insertions(+), 141 deletions(-)
diff --git a/sql/engines/models.py b/sql/engines/models.py
index cc7091ad62..49c483a0d0 100644
--- a/sql/engines/models.py
+++ b/sql/engines/models.py
@@ -1,141 +1,141 @@
-# -*- coding: UTF-8 -*-
-"""engine 结果集定义"""
-import json
-
-
-class SqlItem:
-
- def __init__(self, id=0, statement='', stmt_type='SQL', object_owner='', object_type='', object_name=''):
- '''
- :param id: SQL序号,从0开始
- :param statement: SQL Statement
- :param stmt_type: SQL类型(SQL, PLSQL), 默认为SQL
- :param object_owner: PLSQL Object Owner
- :param object_type: PLSQL Object Type
- :param object_name: PLSQL Object Name
- '''
- self.id = id
- self.statement = statement
- self.stmt_type = stmt_type
- self.object_owner = object_owner
- self.object_type = object_type
- self.object_name = object_name
-
-
-class ReviewResult:
- """审核的单条结果"""
-
- def __init__(self, inception_result=None, **kwargs):
- """
- inception的结果列 = ['ID', 'stage', 'errlevel', 'stagestatus', 'errormessage', 'SQL', 'Affected_rows',
- 'sequence','backup_dbname', 'execute_time', 'sqlsha1']
- go_inception的结果列 = ['order_id', 'stage', 'error_level', 'stage_status', 'error_message', 'sql',
- 'affected_rows', 'sequence', 'backup_dbname', 'execute_time', 'sqlsha1', 'backup_time']
- """
- if inception_result:
- self.id = inception_result[0] or 0
- self.stage = inception_result[1] or ''
- self.errlevel = inception_result[2] or 0
- self.stagestatus = inception_result[3] or ''
- self.errormessage = inception_result[4] or ''
- self.sql = inception_result[5] or ''
- self.affected_rows = inception_result[6] or 0
- self.sequence = inception_result[7] or ''
- self.backup_dbname = inception_result[8] or ''
- self.execute_time = inception_result[9] or ''
- self.sqlsha1 = inception_result[10] or ''
- self.backup_time = inception_result[11] if len(inception_result) >= 12 else ''
- self.actual_affected_rows = ''
- else:
- self.id = kwargs.get('id', 0)
- self.stage = kwargs.get('stage', '')
- self.errlevel = kwargs.get('errlevel', 0)
- self.stagestatus = kwargs.get('stagestatus', '')
- self.errormessage = kwargs.get('errormessage', '')
- self.sql = kwargs.get('sql', '')
- self.affected_rows = kwargs.get('affected_rows', 0)
- self.sequence = kwargs.get('sequence', '')
- self.backup_dbname = kwargs.get('backup_dbname', '')
- self.execute_time = kwargs.get('execute_time', '')
- self.sqlsha1 = kwargs.get('sqlsha1', '')
- self.backup_time = kwargs.get('backup_time', '')
- self.actual_affected_rows = kwargs.get('actual_affected_rows', '')
-
- # 自定义属性
- for key, value in kwargs.items():
- if not hasattr(self, key):
- setattr(self, key, value)
-
-
-class ReviewSet:
- """review和执行后的结果集, rows中是review result, 有设定好的字段"""
-
- def __init__(self, full_sql='', rows=None, status=None,
- affected_rows=0, column_list=None, **kwargs):
- self.full_sql = full_sql
- self.is_execute = False
- self.checked = None
- self.warning = None
- self.error = None
- self.warning_count = 0 # 检测结果警告数
- self.error_count = 0 # 检测结果错误数
- self.is_critical = False
- self.syntax_type = 0 # 语法类型
- # rows 为普通列表
- self.rows = rows or []
- self.column_list = column_list
- self.status = status
- self.affected_rows = affected_rows
-
- def json(self):
- tmp_list = []
- for r in self.rows:
- if isinstance(r, dict):
- tmp_list += [r]
- else:
- tmp_list += [r.__dict__]
-
- return json.dumps(tmp_list)
-
- def to_dict(self):
- tmp_list = []
- for r in self.rows:
- tmp_list += [r.__dict__]
- return tmp_list
-
-
-class ResultSet:
- """查询的结果集, rows 内只有值, column_list 中的是key"""
-
- def __init__(self, full_sql='', rows=None, status=None,
- affected_rows=0, column_list=None, **kwargs):
- self.full_sql = full_sql
- self.is_execute = False
- self.checked = None
- self.is_masked = False
- self.query_time = ''
- self.mask_rule_hit = False
- self.mask_time = ''
- self.warning = None
- self.error = None
- self.is_critical = False
- # rows 为普通列表
- self.rows = rows or []
- self.column_list = column_list if column_list else []
- self.status = status
- self.affected_rows = affected_rows
-
- def json(self):
- tmp_list = []
- for r in self.rows:
- tmp_list += [dict(zip(self.column_list, r))]
- return json.dumps(tmp_list)
-
- def to_dict(self):
- tmp_list = []
- for r in self.rows:
- tmp_list += [dict(zip(self.column_list, r))]
- return tmp_list
-
- def to_sep_dict(self):
- return {'column_list': self.column_list, 'rows': self.rows}
+# -*- coding: UTF-8 -*-
+"""engine 结果集定义"""
+import json
+
+
+class SqlItem:
+
+ def __init__(self, id=0, statement='', stmt_type='SQL', object_owner='', object_type='', object_name=''):
+ '''
+ :param id: SQL序号,从0开始
+ :param statement: SQL Statement
+ :param stmt_type: SQL类型(SQL, PLSQL), 默认为SQL
+ :param object_owner: PLSQL Object Owner
+ :param object_type: PLSQL Object Type
+ :param object_name: PLSQL Object Name
+ '''
+ self.id = id
+ self.statement = statement
+ self.stmt_type = stmt_type
+ self.object_owner = object_owner
+ self.object_type = object_type
+ self.object_name = object_name
+
+class ReviewResult:
+ """审核的单条结果"""
+
+ def __init__(self, inception_result=None, **kwargs):
+ """
+ inception的结果列 = ['ID', 'stage', 'errlevel', 'stagestatus', 'errormessage', 'SQL', 'Affected_rows',
+ 'sequence','backup_dbname', 'execute_time', 'sqlsha1']
+ go_inception的结果列 = ['order_id', 'stage', 'error_level', 'stage_status', 'error_message', 'sql',
+ 'affected_rows', 'sequence', 'backup_dbname', 'execute_time', 'sqlsha1', 'backup_time']
+ """
+ if inception_result:
+ self.id = inception_result[0] or 0
+ self.stage = inception_result[1] or ''
+ self.errlevel = inception_result[2] or 0
+ self.stagestatus = inception_result[3] or ''
+ self.errormessage = inception_result[4] or ''
+ self.sql = inception_result[5] or ''
+ self.affected_rows = inception_result[6] or 0
+ self.sequence = inception_result[7] or ''
+ self.backup_dbname = inception_result[8] or ''
+ self.execute_time = inception_result[9] or ''
+ self.sqlsha1 = inception_result[10] or ''
+ self.backup_time = inception_result[11] if len(inception_result) >= 12 else ''
+ self.actual_affected_rows = ''
+ else:
+ self.id = kwargs.get('id', 0)
+ self.stage = kwargs.get('stage', '')
+ self.errlevel = kwargs.get('errlevel', 0)
+ self.stagestatus = kwargs.get('stagestatus', '')
+ self.errormessage = kwargs.get('errormessage', '')
+ self.sql = kwargs.get('sql', '')
+ self.affected_rows = kwargs.get('affected_rows', 0)
+ self.sequence = kwargs.get('sequence', '')
+ self.backup_dbname = kwargs.get('backup_dbname', '')
+ self.execute_time = kwargs.get('execute_time', '')
+ self.sqlsha1 = kwargs.get('sqlsha1', '')
+ self.backup_time = kwargs.get('backup_time', '')
+ self.actual_affected_rows = kwargs.get('actual_affected_rows', '')
+
+ # 自定义属性
+ for key, value in kwargs.items():
+ if not hasattr(self, key):
+ setattr(self, key, value)
+
+
+
+class ReviewSet:
+ """review和执行后的结果集, rows中是review result, 有设定好的字段"""
+
+ def __init__(self, full_sql='', rows=None, status=None,
+ affected_rows=0, column_list=None, **kwargs):
+ self.full_sql = full_sql
+ self.is_execute = False
+ self.checked = None
+ self.warning = None
+ self.error = None
+ self.warning_count = 0 # 检测结果警告数
+ self.error_count = 0 # 检测结果错误数
+ self.is_critical = False
+ self.syntax_type = 0 # 语法类型
+ # rows 为普通列表
+ self.rows = rows or []
+ self.column_list = column_list
+ self.status = status
+ self.affected_rows = affected_rows
+
+ def json(self):
+ tmp_list = []
+ for r in self.rows:
+ if isinstance(r, dict):
+ tmp_list += [r]
+ else:
+ tmp_list += [r.__dict__]
+
+ return json.dumps(tmp_list)
+
+ def to_dict(self):
+ tmp_list = []
+ for r in self.rows:
+ tmp_list += [r.__dict__]
+ return tmp_list
+
+
+class ResultSet:
+ """查询的结果集, rows 内只有值, column_list 中的是key"""
+
+ def __init__(self, full_sql='', rows=None, status=None,
+ affected_rows=0, column_list=None, **kwargs):
+ self.full_sql = full_sql
+ self.is_execute = False
+ self.checked = None
+ self.is_masked = False
+ self.query_time = ''
+ self.mask_rule_hit = False
+ self.mask_time = ''
+ self.warning = None
+ self.error = None
+ self.is_critical = False
+ # rows 为普通列表
+ self.rows = rows or []
+ self.column_list = column_list if column_list else []
+ self.status = status
+ self.affected_rows = affected_rows
+
+ def json(self):
+ tmp_list = []
+ for r in self.rows:
+ tmp_list += [dict(zip(self.column_list, r))]
+ return json.dumps(tmp_list)
+
+ def to_dict(self):
+ tmp_list = []
+ for r in self.rows:
+ tmp_list += [dict(zip(self.column_list, r))]
+ return tmp_list
+
+ def to_sep_dict(self):
+ return {'column_list': self.column_list, 'rows': self.rows}
\ No newline at end of file
From 81883b637eb9ba1c12946dd79275f0d1be791e98 Mon Sep 17 00:00:00 2001
From: Jan <60806666+jan-song@users.noreply.github.com>
Date: Fri, 24 Apr 2020 20:50:41 +0800
Subject: [PATCH 16/16] Add files via upload
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
支持Oracle select、update\delete\insert\create table\create index的语法审核。支持数据修改备份功能。支持执行计划查看。修复查询bug
---
sql/engines/oracle.py | 560 +++++++++++++++++++++++++++++++++++-------
1 file changed, 476 insertions(+), 84 deletions(-)
diff --git a/sql/engines/oracle.py b/sql/engines/oracle.py
index afb5aff7c1..ca4f4f6741 100644
--- a/sql/engines/oracle.py
+++ b/sql/engines/oracle.py
@@ -6,6 +6,7 @@
import sqlparse
import MySQLdb
import simplejson as json
+import threading
from common.config import SysConfig
from common.utils.timer import FuncTimer
@@ -17,7 +18,6 @@
logger = logging.getLogger('default')
-
class OracleEngine(EngineBase):
def __init__(self, instance=None):
@@ -53,6 +53,7 @@ def auto_backup(self):
@staticmethod
def get_backup_connection():
+ """备份库连接"""
archer_config = SysConfig()
backup_host = archer_config.get('inception_remote_backup_host')
backup_port = int(archer_config.get('inception_remote_backup_port', 3306))
@@ -65,6 +66,7 @@ def get_backup_connection():
charset='utf8mb4',
autocommit=True
)
+
@property
def server_version(self):
conn = self.get_connection()
@@ -96,7 +98,7 @@ def _get_all_schemas(self):
获取模式列表
:return:
"""
- result = self.query(sql="SELECT username FROM all_users")
+ result = self.query(sql="SELECT username FROM all_users order by username")
sysschema = (
'AUD_SYS', 'ANONYMOUS', 'APEX_030200', 'APEX_PUBLIC_USER', 'APPQOSSYS', 'BI USERS', 'CTXSYS', 'DBSNMP',
'DIP USERS', 'EXFSYS', 'FLOWS_FILES', 'HR USERS', 'IX USERS', 'MDDATA', 'MDSYS', 'MGMT_VIEW', 'OE USERS',
@@ -109,9 +111,17 @@ def _get_all_schemas(self):
def get_all_tables(self, db_name, **kwargs):
"""获取table 列表, 返回一个ResultSet"""
- sql = f"""SELECT table_name FROM all_tables WHERE nvl(tablespace_name, 'no tablespace') NOT IN ('SYSTEM', 'SYSAUX') AND OWNER = '{db_name}' AND IOT_NAME IS NULL AND DURATION IS NULL order by table_name
+ sql = f"""SELECT table_name FROM all_tables WHERE nvl(tablespace_name, 'no tablespace') NOT IN ('SYSTEM', 'SYSAUX') AND OWNER = '{db_name}' AND IOT_NAME IS NULL AND DURATION IS NULL order by table_name
"""
- result = self.query(sql=sql)
+ result = self.query(db_name=db_name, sql=sql)
+ tb_list = [row[0] for row in result.rows if row[0] not in ['test']]
+ result.rows = tb_list
+ return result
+
+ def get_all_objects(self, db_name, **kwargs):
+ """获取object_name 列表, 返回一个ResultSet"""
+ sql = f"""SELECT object_name FROM all_objects WHERE OWNER = '{db_name}' """
+ result = self.query(db_name=db_name, sql=sql)
tb_list = [row[0] for row in result.rows if row[0] not in ['test']]
result.rows = tb_list
return result
@@ -135,9 +145,144 @@ def describe_table(self, db_name, tb_name, **kwargs):
FROM all_tab_cols
WHERE table_name = '{tb_name}' and owner = '{db_name}' order by column_id
"""
- result = self.query(sql=sql)
+ result = self.query(db_name=db_name, sql=sql)
return result
+ def object_name_check(self, db_name=None, object_name=''):
+ """获取table 列表, 返回一个ResultSet"""
+ if '.' in object_name:
+ schema_name = object_name.split('.')[0]
+ object_name = object_name.split('.')[1]
+ sql = f"""SELECT object_name FROM all_objects WHERE OWNER = upper('{schema_name}') and OBJECT_NAME = upper('{object_name}')"""
+ else:
+ sql = f"""SELECT object_name FROM all_objects WHERE OWNER = upper('{db_name}') and OBJECT_NAME = upper('{object_name}')"""
+ result = self.query(db_name=db_name, sql=sql,close_conn=False)
+ if result.affected_rows > 0:
+ return True
+ else:
+ return False
+
+ def get_sql_first_object_name(self, sql=''):
+ """获取sql文本中的object_name"""
+ object_name = ''
+ if re.match(r"^create\s+table\s", sql):
+ object_name = re.match(r"^create\s+table\s(.+?)(\s|\()",sql,re.M).group(1)
+ elif re.match(r"^create\s+index\s", sql):
+ object_name = re.match(r"^create\s+index\s(.+?)\s",sql,re.M).group(1)
+ elif re.match(r"^create\s+unique\s+index\s", sql):
+ object_name = re.match(r"^create\s+unique\s+index\s(.+?)\s", sql, re.M).group(1)
+ elif re.match(r"^create\s+sequence\s", sql):
+ object_name = re.match(r"^create\s+sequence\s(.+?)(\s|$)",sql,re.M).group(1)
+ elif re.match(r"^alter\s+table\s", sql):
+ object_name = re.match(r"^alter\s+table\s(.+?)\s",sql,re.M).group(1)
+ elif re.match(r"^create\s+function\s", sql):
+ object_name = re.match(r"^create\s+function\s(.+?)(\s|\()",sql,re.M).group(1)
+ elif re.match(r"^create\s+view\s", sql):
+ object_name = re.match(r"^create\s+view\s(.+?)\s",sql,re.M).group(1)
+ elif re.match(r"^create\s+procedure\s", sql):
+ object_name = re.match(r"^create\s+procedure\s(.+?)\s",sql,re.M).group(1)
+ elif re.match(r"^create\s+package\s+body", sql):
+ object_name = re.match(r"^create\s+package\s+body\s(.+?)\s",sql,re.M).group(1)
+ elif re.match(r"^create\s+package\s", sql):
+ object_name = re.match(r"^create\s+package\s(.+?)\s",sql,re.M).group(1)
+ else:
+ return object_name.strip()
+ return object_name.strip()
+
+ def check_create_index_table(self,sql='',object_name_list=set(),db_name=''):
+ result = {'msg': '', 'bad_query': False}
+ table_name = ''
+ if re.match(r"^create\s+index\s",sql):
+ table_name = re.match(r"^create\s+index\s+.+\s+on\s(.+?)(\(|\s\()",sql,re.M).group(1)
+ if '.' not in table_name:
+ table_name = f"{db_name}.{table_name}"
+ if table_name in object_name_list:
+ return True
+ else:
+ return False
+ elif re.match(r"^create\s+unique\s+index\s", sql):
+ table_name = re.match(r"^create\s+unique\s+index\s+.+\s+on\s(.+?)(\(|\s\()", sql, re.M).group(1)
+ if '.' not in table_name:
+ table_name = f"{db_name}.{table_name}"
+ if table_name in object_name_list:
+ return True
+ else:
+ return False
+ else:
+ return False
+
+ def get_dml_table(self,sql='',object_name_list=set(),db_name=''):
+ if re.match(r"^update",sql):
+ table_name = re.match(r"^update\s(.+?)\s",sql,re.M).group(1)
+ if '.' not in table_name:
+ table_name = f"{db_name}.{table_name}"
+ if table_name in object_name_list:
+ return True
+ else:
+ return False
+ elif re.match(r"^delete", sql):
+ table_name = re.match(r"^delete\s+from\s(.+?)\s", sql, re.M).group(1)
+ if '.' not in table_name:
+ table_name = f"{db_name}.{table_name}"
+ if table_name in object_name_list:
+ return True
+ else:
+ return False
+ elif re.match(r"^insert", sql):
+ table_name = re.match(r"^insert\s+into\s(.+?)(\(|\s)", sql, re.M).group(1)
+ if '.' not in table_name:
+ table_name = f"{db_name}.{table_name}"
+ if table_name in object_name_list:
+ return True
+ else:
+ return False
+ else:
+ return False
+
+ def where_check(self,sql=''):
+ if re.match(r"^update((?!where).)*$|^delete((?!where).)*$",sql):
+ return True
+ else:
+ parsed = sqlparse.parse(sql)[0]
+ flattened = list(parsed.flatten())
+ n_skip = 0
+ flattened = flattened[:len(flattened) - n_skip]
+ logical_operators = ('AND', 'OR', 'NOT', 'BETWEEN', 'ORDER BY', 'GROUP BY', 'HAVING')
+ for t in reversed(flattened):
+ if t.is_keyword:
+ return True
+ return False
+
+ def explain_check(self, db_name=None, sql='', close_conn=False):
+ #使用explain进行支持的SQL语法审核,连接需不中断,防止数据库不断fork进程的大批量消耗
+ result = {'msg': '', 'rows': 0}
+ try:
+ conn = self.get_connection()
+ cursor = conn.cursor()
+ if db_name:
+ cursor.execute(f"ALTER SESSION SET CURRENT_SCHEMA = {db_name}")
+ if re.match(r"^explain", sql, re.I):
+ sql = sql
+ else:
+ sql = f"explain plan for {sql}"
+ sql = sql.rstrip(';')
+ cursor.execute(sql)
+ # 获取影响行数
+ cursor.execute(f"select CARDINALITY from SYS.PLAN_TABLE$ where id = 0")
+ rows = cursor.fetchone()
+ conn.rollback()
+ if rows[0] is None:
+ result['rows'] = 0
+ else:
+ result['rows'] = rows[0]
+ except Exception as e:
+ logger.warning(f"Oracle 语句执行报错,语句:{sql},错误信息{traceback.format_exc()}")
+ result['msg'] = str(e)
+ finally:
+ if close_conn:
+ self.close()
+ return result
+
def query_check(self, db_name=None, sql=''):
# 查询语句的检查、注释去除、切分
result = {'msg': '', 'bad_query': False, 'filtered_sql': sql, 'has_star': False}
@@ -145,9 +290,9 @@ def query_check(self, db_name=None, sql=''):
star_patter = r"(^|,|\s)\*(\s|\(|$)"
# 删除注释语句,进行语法判断,执行第一条有效sql
try:
+ sql = sqlparse.format(sql, strip_comments=True)
sql = sqlparse.split(sql)[0]
result['filtered_sql'] = re.sub(r';$', '', sql.strip())
- sql = sqlparse.format(sql, strip_comments=True)
sql_lower = sql.lower()
except IndexError:
result['bad_query'] = True
@@ -155,7 +300,7 @@ def query_check(self, db_name=None, sql=''):
return result
if re.match(r"^select|^with|^explain", sql_lower) is None:
result['bad_query'] = True
- result['msg'] = '仅支持^select语法!'
+ result['msg'] = '不支持语法!'
return result
if re.search(star_patter, sql_lower) is not None:
keyword_warning += '禁止使用 * 关键词\n'
@@ -165,13 +310,19 @@ def query_check(self, db_name=None, sql=''):
result['bad_query'] = True
if result.get('bad_query') or result.get('has_star'):
result['msg'] = keyword_warning
+ #select语句先使用Explain判断语法是否正确
+ if re.match(r"^select|^with", sql, re.I):
+ explain_result = self.explain_check(db_name=db_name, sql=f"explain plan for {sql}")
+ if explain_result['msg']:
+ result['bad_query'] = True
+ result['msg'] = explain_result['msg']
return result
def filter_sql(self, sql='', limit_num=0):
sql_lower = sql.lower()
# 对查询sql增加limit限制
- if re.match(r"^select|^with", sql_lower):
- sql = f"select a.* from ({sql.rstrip(';')}) a WHERE ROWNUM <= {limit_num}"
+ if re.match(r"^select|^with", sql_lower) and not (re.match(r"^select\s+sql_audit.", sql_lower) and sql_lower.find(" sql_audit where rownum <= ") != -1) :
+ sql = f"select sql_audit.* from ({sql.rstrip(';')}) sql_audit where rownum <= {limit_num}"
return sql.strip()
def query(self, db_name=None, sql='', limit_num=0, close_conn=True, **kwargs):
@@ -185,9 +336,9 @@ def query(self, db_name=None, sql='', limit_num=0, close_conn=True, **kwargs):
sql = sql.rstrip(';')
# 支持oralce查询SQL执行计划语句
if re.match(r"^explain", sql, re.I):
- cursor.execute(sql)
- # 重置SQL文本,获取SQL执行计划
- sql = f"select PLAN_TABLE_OUTPUT from table(dbms_xplan.display)"
+ cursor.execute(sql)
+ # 重置SQL文本,获取SQL执行计划
+ sql = f"select PLAN_TABLE_OUTPUT from table(dbms_xplan.display)"
cursor.execute(sql)
fields = cursor.description
if any(x[1] == cx_Oracle.CLOB for x in fields):
@@ -199,7 +350,6 @@ def query(self, db_name=None, sql='', limit_num=0, close_conn=True, **kwargs):
rows = cursor.fetchmany(int(limit_num))
else:
rows = cursor.fetchall()
-
result_set.column_list = [i[0] for i in fields] if fields else []
result_set.rows = [tuple(x) for x in rows]
result_set.affected_rows = len(result_set.rows)
@@ -215,65 +365,238 @@ def query_masking(self, schema_name=None, sql='', resultset=None):
"""传入 sql语句, db名, 结果集,
返回一个脱敏后的结果集"""
# 仅对select语句脱敏
- if re.match(r"^select", sql, re.I):
+ if re.match(r"^select|^with", sql, re.I):
filtered_result = brute_mask(self.instance, resultset)
filtered_result.is_masked = True
else:
filtered_result = resultset
return filtered_result
- def execute_check(self, db_name=None, sql=''):
- """上线单执行前的检查, 返回Review set"""
+ def execute_check(self, db_name=None, sql='', close_conn=True):
+ """
+ 上线单执行前的检查, 返回Review set
+ update by Jan.song 20200302
+ 使用explain对数据修改预计进行检测
+ """
config = SysConfig()
check_result = ReviewSet(full_sql=sql)
+ #explain支持的语法
+ explain_re = r"^merge|^update|^delete|^insert|^create\s+table|^create\s+index|^create\s+unique\s+index"
# 禁用/高危语句检查
line = 1
+ #保存SQL中的新建对象
+ object_name_list = set()
critical_ddl_regex = config.get('critical_ddl_regex', '')
p = re.compile(critical_ddl_regex)
check_result.syntax_type = 2 # TODO 工单类型 0、其他 1、DDL,2、DML
-
- # 把所有SQL转换成SqlItem List。 如有多行(内部有多个;)执行块,约定以delimiter $$作为开始, 以$$结束
- # 需要在函数里实现单条SQL做sqlparse.format(sql, strip_comments=True)
- sqlitemList = get_full_sqlitem_list(sql, db_name)
-
- for sqlitem in sqlitemList:
- # 禁用语句
- if re.match(r"^\s*select", sqlitem.statement.lower(), re.I):
- check_result.is_critical = True
- result = ReviewResult(id=line, errlevel=2,
- stagestatus='驳回不支持语句',
- errormessage='仅支持DML和DDL语句,查询语句请使用SQL查询功能!',
- sql=sqlitem.statement)
- # 高危语句
- elif critical_ddl_regex and p.match(sqlitem.statement.strip().lower()):
- check_result.is_critical = True
- result = ReviewResult(id=line, errlevel=2,
- stagestatus='驳回高危SQL',
- errormessage='禁止提交匹配' + critical_ddl_regex + '条件的语句!',
- sql=sqlitem.statement)
-
- # 正常语句
- else:
- result = ReviewResult(id=line, errlevel=0,
- stagestatus='Audit completed',
- errormessage='None',
- sql=sqlitem.statement,
- stmt_type=sqlitem.stmt_type,
- object_owner=sqlitem.object_owner,
- object_type=sqlitem.object_type,
- object_name=sqlitem.object_name,
- affected_rows=0,
- execute_time=0, )
- # 判断工单类型
- if get_syntax_type(sqlitem.statement) == 'DDL':
- check_result.syntax_type = 1
- check_result.rows += [result]
-
- # 遇到禁用和高危语句直接返回,提高效率
- if check_result.is_critical:
- check_result.error_count += 1
- return check_result
- line += 1
+ try:
+ sqlitemList = get_full_sqlitem_list(sql, db_name)
+ for sqlitem in sqlitemList:
+ sql_lower = sqlitem.statement.lower().rstrip(';')
+ # 禁用语句
+ if re.match(r"^select|^with|^explain", sql_lower):
+ check_result.is_critical = True
+ result = ReviewResult(id=line, errlevel=2,
+ stagestatus='驳回不支持语句',
+ errormessage='仅支持DML和DDL语句,查询语句请使用SQL查询功能!',
+ sql=sqlitem.statement)
+ # 高危语句
+ elif critical_ddl_regex and p.match(sql_lower.strip()):
+ check_result.is_critical = True
+ result = ReviewResult(id=line, errlevel=2,
+ stagestatus='驳回高危SQL',
+ errormessage='禁止提交匹配' + critical_ddl_regex + '条件的语句!',
+ sql=sqlitem.statement)
+ # 驳回未带where数据修改语句,如确实需做全部删除或更新,显示的带上where 1=1
+ elif re.match(r"^update((?!where).)*$|^delete((?!where).)*$",sql_lower):
+ check_result.is_critical = True
+ result = ReviewResult(id=line, errlevel=2,
+ stagestatus='驳回未带where数据修改',
+ errormessage='数据修改需带where条件!',
+ sql=sqlitem.statement)
+ # 驳回事务控制,会话控制SQL
+ elif re.match(r"^set|^rollback|^exit", sql_lower):
+ check_result.is_critical = True
+ result = ReviewResult(id=line, errlevel=2,
+ stagestatus='SQL中不能包含^set|^rollback|^exit',
+ errormessage='SQL中不能包含^set|^rollback|^exit',
+ sql=sqlitem.statement)
+
+ #通过explain对SQL做语法语义检查
+ elif re.match(explain_re, sql_lower) and sqlitem.stmt_type == 'SQL':
+ if self.check_create_index_table(db_name=db_name,sql=sql_lower,object_name_list=object_name_list):
+ object_name = self.get_sql_first_object_name(sql=sql_lower)
+ if '.' in object_name:
+ object_name = object_name
+ else:
+ object_name = f"""{db_name}.{object_name}"""
+ object_name_list.add(object_name)
+ result = ReviewResult(id=line, errlevel=1,
+ stagestatus='WARNING:新建表的新建索引语句暂无法检测!',
+ errormessage='WARNING:新建表的新建索引语句暂无法检测!',
+ stmt_type=sqlitem.stmt_type,
+ object_owner=sqlitem.object_owner,
+ object_type=sqlitem.object_type,
+ object_name=sqlitem.object_name,
+ sql=sqlitem.statement)
+ elif len(object_name_list) > 0 and self.get_dml_table(db_name=db_name,sql=sql_lower,object_name_list=object_name_list):
+ result = ReviewResult(id=line, errlevel=1,
+ stagestatus='WARNING:新建表的数据修改暂无法检测!',
+ errormessage='WARNING:新建表的数据修改暂无法检测!',
+ stmt_type=sqlitem.stmt_type,
+ object_owner=sqlitem.object_owner,
+ object_type=sqlitem.object_type,
+ object_name=sqlitem.object_name,
+ sql=sqlitem.statement)
+ else:
+ result_set = self.explain_check(db_name=db_name, sql=sqlitem.statement, close_conn=False)
+ if result_set['msg']:
+ check_result.is_critical = True
+ result = ReviewResult(id=line, errlevel=2,
+ stagestatus='explain语法检查未通过!',
+ errormessage=result_set['msg'],
+ sql=sqlitem.statement)
+ else:
+ # 对create table\create index\create unique index语法做对象存在性检测
+ if re.match(r"^create\s+table|^create\s+index|^create\s+unique\s+index", sql_lower):
+ object_name = self.get_sql_first_object_name(sql=sql_lower)
+ # 保存create对象对后续SQL做存在性判断
+ if '.' in object_name:
+ object_name = object_name
+ else:
+ object_name = f"""{db_name}.{object_name}"""
+ if self.object_name_check(db_name=db_name,
+ object_name=object_name) or object_name in object_name_list:
+ check_result.is_critical = True
+ result = ReviewResult(id=line, errlevel=2,
+ stagestatus=f"""{object_name}对象已经存在!""",
+ errormessage=f"""{object_name}对象已经存在!""",
+ sql=sqlitem.statement)
+ else:
+ object_name_list.add(object_name)
+ if result_set['rows'] > 1000:
+ result = ReviewResult(id=line, errlevel=1,
+ stagestatus='影响行数大于1000,请关注',
+ errormessage='影响行数大于1000,请关注',
+ sql=sqlitem.statement,
+ stmt_type=sqlitem.stmt_type,
+ object_owner=sqlitem.object_owner,
+ object_type=sqlitem.object_type,
+ object_name=sqlitem.object_name,
+ affected_rows=result_set['rows'],
+ execute_time=0, )
+ else:
+ result = ReviewResult(id=line, errlevel=0,
+ stagestatus='Audit completed',
+ errormessage='None',
+ sql=sqlitem.statement,
+ stmt_type=sqlitem.stmt_type,
+ object_owner=sqlitem.object_owner,
+ object_type=sqlitem.object_type,
+ object_name=sqlitem.object_name,
+ affected_rows=result_set['rows'],
+ execute_time=0, )
+ else:
+ if result_set['rows'] > 1000:
+ result = ReviewResult(id=line, errlevel=1,
+ stagestatus='影响行数大于1000,请关注',
+ errormessage='影响行数大于1000,请关注',
+ sql=sqlitem.statement,
+ stmt_type=sqlitem.stmt_type,
+ object_owner=sqlitem.object_owner,
+ object_type=sqlitem.object_type,
+ object_name=sqlitem.object_name,
+ affected_rows=result_set['rows'],
+ execute_time=0, )
+ else:
+ result = ReviewResult(id=line, errlevel=0,
+ stagestatus='Audit completed',
+ errormessage='None',
+ sql=sqlitem.statement,
+ stmt_type=sqlitem.stmt_type,
+ object_owner=sqlitem.object_owner,
+ object_type=sqlitem.object_type,
+ object_name=sqlitem.object_name,
+ affected_rows=result_set['rows'],
+ execute_time=0, )
+ # 其它无法用explain判断的语句
+ else:
+ # 对alter table做对象存在性检查
+ if re.match(r"^alter\s+table\s", sql_lower):
+ object_name = self.get_sql_first_object_name(sql=sql_lower)
+ if '.' in object_name:
+ object_name = object_name
+ else:
+ object_name = f"""{db_name}.{object_name}"""
+ if not self.object_name_check(db_name=db_name, object_name=object_name) and object_name not in object_name_list:
+ check_result.is_critical = True
+ result = ReviewResult(id=line, errlevel=2,
+ stagestatus=f"""{object_name}对象不存在!""",
+ errormessage=f"""{object_name}对象不存在!""",
+ sql=sqlitem.statement)
+ else:
+ result = ReviewResult(id=line, errlevel=1,
+ stagestatus='当前平台,此语法不支持审核!',
+ errormessage='当前平台,此语法不支持审核!',
+ sql=sqlitem.statement,
+ stmt_type=sqlitem.stmt_type,
+ object_owner=sqlitem.object_owner,
+ object_type=sqlitem.object_type,
+ object_name=sqlitem.object_name,
+ affected_rows=0,
+ execute_time=0, )
+ # 对create做对象存在性检查
+ elif re.match(r"^create", sql_lower):
+ object_name = self.get_sql_first_object_name(sql=sql_lower)
+ if '.' in object_name:
+ object_name = object_name
+ else:
+ object_name = f"""{db_name}.{object_name}"""
+ if self.object_name_check(db_name=db_name,
+ object_name=object_name) or object_name in object_name_list:
+ check_result.is_critical = True
+ result = ReviewResult(id=line, errlevel=2,
+ stagestatus=f"""{object_name}对象已经存在!""",
+ errormessage=f"""{object_name}对象已经存在!""",
+ sql=sqlitem.statement)
+ else:
+ object_name_list.add(object_name)
+ result = ReviewResult(id=line, errlevel=1,
+ stagestatus='当前平台,此语法不支持审核!',
+ errormessage='当前平台,此语法不支持审核!',
+ sql=sqlitem.statement,
+ stmt_type=sqlitem.stmt_type,
+ object_owner=sqlitem.object_owner,
+ object_type=sqlitem.object_type,
+ object_name=sqlitem.object_name,
+ affected_rows=0,
+ execute_time=0, )
+ else:
+ result = ReviewResult(id=line, errlevel=1,
+ stagestatus='当前平台,此语法不支持审核!',
+ errormessage='当前平台,此语法不支持审核!',
+ sql=sqlitem.statement,
+ stmt_type=sqlitem.stmt_type,
+ object_owner=sqlitem.object_owner,
+ object_type=sqlitem.object_type,
+ object_name=sqlitem.object_name,
+ affected_rows=0,
+ execute_time=0, )
+ # 判断工单类型
+ if get_syntax_type(sql=sqlitem.statement, db_type='oracle') == 'DDL':
+ check_result.syntax_type = 1
+ check_result.rows += [result]
+ # 遇到禁用和高危语句直接返回,提高效率
+ if check_result.is_critical:
+ check_result.error_count += 1
+ return check_result
+ line += 1
+ except Exception as e:
+ logger.warning(f"Oracle 语句执行报错,第{line}个SQL:{sqlitem.statement},错误信息{traceback.format_exc()}")
+ check_result.error = str(e)
+ finally:
+ if close_conn:
+ self.close()
return check_result
def execute_workflow(self, workflow, close_conn=True):
@@ -294,7 +617,7 @@ def execute_workflow(self, workflow, close_conn=True):
try:
conn = self.get_connection()
cursor = conn.cursor()
- # 获取执行工单时间,用于备份SQL的日志挖掘起始时间
+ #获取执行工单时间,用于备份SQL的日志挖掘起始时间
cursor.execute(f"alter session set nls_date_format='yyyy-mm-dd hh24:mi:ss'")
cursor.execute(f"select sysdate from dual")
rows = cursor.fetchone()
@@ -305,16 +628,17 @@ def execute_workflow(self, workflow, close_conn=True):
if sqlitem.stmt_type == "SQL":
statement = statement.rstrip(';')
with FuncTimer() as t:
- cursor.execute(statement)
- conn.commit()
+ if statement !='':
+ cursor.execute(statement)
+ conn.commit()
rowcount = cursor.rowcount
stagestatus = "Execute Successfully"
if sqlitem.stmt_type == "PLSQL" and sqlitem.object_name and sqlitem.object_name != 'ANONYMOUS' and sqlitem.object_name != '':
query_obj_sql = f"""SELECT OBJECT_NAME, STATUS, TO_CHAR(LAST_DDL_TIME, 'YYYY-MM-DD HH24:MI:SS') FROM ALL_OBJECTS
- WHERE OWNER = '{sqlitem.object_owner}'
- AND OBJECT_NAME = '{sqlitem.object_name}'
- """
+ WHERE OWNER = '{sqlitem.object_owner}'
+ AND OBJECT_NAME = '{sqlitem.object_name}'
+ """
cursor.execute(query_obj_sql)
row = cursor.fetchone()
if row:
@@ -333,13 +657,14 @@ def execute_workflow(self, workflow, close_conn=True):
stagestatus=stagestatus,
errormessage='None',
sql=statement,
- affected_rows=rowcount,
+ affected_rows=cursor.rowcount,
execute_time=t.cost,
))
line += 1
except Exception as e:
logger.warning(f"Oracle命令执行报错,语句:{statement or sql}, 错误信息:{traceback.format_exc()}")
execute_result.error = str(e)
+ #conn.rollback()
# 追加当前报错语句信息到执行结果中
execute_result.rows.append(ReviewResult(
id=line,
@@ -352,7 +677,7 @@ def execute_workflow(self, workflow, close_conn=True):
))
line += 1
# 报错语句后面的语句标记为审核通过、未执行,追加到执行结果中
- for sqlitem in sqlitemList[line - 1:]:
+ for sqlitem in sqlitemList[line - 1:]:
execute_result.rows.append(ReviewResult(
id=line,
errlevel=0,
@@ -367,30 +692,38 @@ def execute_workflow(self, workflow, close_conn=True):
cursor.execute(f"select sysdate from dual")
rows = cursor.fetchone()
end_time = rows[0]
- self.backup(workflow_id=workflow.id, cursor=cursor, begin_time=begin_time, end_time=end_time)
+ self.backup(workflow_id=workflow.id,cursor=cursor,begin_time=begin_time,end_time=end_time)
if close_conn:
self.close()
return execute_result
def backup(self,workflow_id,cursor,begin_time,end_time):
- # 回滚SQL入库
+ """
+ :param workflow_id: 工单流程ID,作为备份记录与工单的关联列
+ :param cursor: 执行SQL的当前会话游标
+ :param begin_time: 执行SQL开始时间
+ :param end_time: 执行SQL结束时间
+ :return:
+ """
+ # add Jan.song 2020402
# 生成回滚SQL,执行用户需要有grant select any transaction to 权限,需要有grant execute on dbms_logmnr to权限
# 数据库需开启最小化附加日志alter database add supplemental log data;
# 需为归档模式;开启附件日志会增加redo日志量,一般不会有多大影响,需评估归档磁盘空间,redo磁盘IO性能
- # 创建备份库连接
try:
+ #备份存放数据库和MySQL备份库统一,需新建备份用database和table,table存放备份SQL,记录使用workflow.id关联上线工单
conn = self.get_backup_connection()
- cur = conn.cursor()
- cur.execute(f"""create database if not exists ora_backup;""")
- cur.execute(f"use ora_backup;")
- cur.execute(f"""CREATE TABLE if not exists `sql_rollback` (
- `id` bigint(20) NOT NULL AUTO_INCREMENT,
- `redo_sql` mediumtext,
- `undo_sql` mediumtext,
- `workflow_id` bigint(20) NOT NULL,
- PRIMARY KEY (`id`),
- key `idx_sql_rollback_01` (`workflow_id`)
- ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;""")
+ backup_cursor = conn.cursor()
+ backup_cursor.execute(f"""create database if not exists ora_backup;""")
+ backup_cursor.execute(f"use ora_backup;")
+ backup_cursor.execute(f"""CREATE TABLE if not exists `sql_rollback` (
+ `id` bigint(20) NOT NULL AUTO_INCREMENT,
+ `redo_sql` mediumtext,
+ `undo_sql` mediumtext,
+ `workflow_id` bigint(20) NOT NULL,
+ PRIMARY KEY (`id`),
+ key `idx_sql_rollback_01` (`workflow_id`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;""")
+ #使用logminer抓取回滚SQL
logmnr_start_sql = f'''begin
dbms_logmnr.start_logmnr(
starttime=>to_date('{begin_time}','yyyy-mm-dd hh24:mi:ss'),
@@ -417,8 +750,9 @@ def backup(self,workflow_id,cursor,begin_time,end_time):
else:
undo_sql=f"{row[1]}"
undo_sql=undo_sql.replace("'","\\'")
+ #回滚SQL入库
sql = f"""insert into sql_rollback(redo_sql,undo_sql,workflow_id) values('{redo_sql}','{undo_sql}',{workflow_id});"""
- cur.execute(sql)
+ backup_cursor.execute(sql)
except Exception as e:
logger.warning(f"备份失败,错误信息{traceback.format_exc()}")
return False
@@ -430,6 +764,7 @@ def backup(self,workflow_id,cursor,begin_time,end_time):
def get_rollback(self, workflow):
"""
+ add by Jan.song 20200402
获取回滚语句,并且按照执行顺序倒序展示,return ['源语句','回滚语句']
"""
list_execute_result = json.loads(workflow.sqlworkflowcontent.execute_result)
@@ -455,9 +790,66 @@ def get_rollback(self, workflow):
# 关闭连接
if conn:
conn.close()
- return list_backup_sql
+ return list_backup_sql
+
+ def sqltuningadvisor(self, db_name=None, sql='', close_conn=True, **kwargs):
+ """
+ add by Jan.song 20200421
+ 使用DBMS_SQLTUNE包做sql tuning支持
+ 执行用户需要有advior角色
+ 返回 ResultSet
+ """
+ result_set = ResultSet(full_sql=sql)
+ task_name = 'sqlaudit'+f'''{threading.currentThread().ident}'''
+ task_begin = 0
+ try:
+ conn = self.get_connection()
+ cursor = conn.cursor()
+ sql = sql.rstrip(';')
+ # 创建分析任务
+ create_task_sql = f'''DECLARE
+ my_task_name VARCHAR2(30);
+ my_sqltext CLOB;
+ BEGIN
+ my_sqltext := '{sql}';
+ my_task_name := DBMS_SQLTUNE.CREATE_TUNING_TASK(
+ sql_text => my_sqltext,
+ user_name => '{db_name}',
+ scope => 'COMPREHENSIVE',
+ time_limit => 30,
+ task_name => '{task_name}',
+ description => 'tuning');
+ DBMS_SQLTUNE.EXECUTE_TUNING_TASK( task_name => '{task_name}');
+ END;'''
+ task_begin = 1
+ cursor.execute(create_task_sql)
+ # 获取分析报告
+ get_task_sql = f'''select DBMS_SQLTUNE.REPORT_TUNING_TASK( '{task_name}') from dual'''
+ cursor.execute(get_task_sql)
+ fields = cursor.description
+ if any(x[1] == cx_Oracle.CLOB for x in fields):
+ rows = [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) for c in r]) for r in cursor]
+ else:
+ rows = cursor.fetchall()
+ result_set.column_list = [i[0] for i in fields] if fields else []
+ result_set.rows = [tuple(x) for x in rows]
+ result_set.affected_rows = len(result_set.rows)
+ except Exception as e:
+ logger.warning(f"Oracle 语句执行报错,语句:{sql},错误信息{traceback.format_exc()}")
+ result_set.error = str(e)
+ finally:
+ #结束分析任务
+ if task_begin == 1:
+ end_sql = f'''DECLARE
+ begin
+ dbms_sqltune.drop_tuning_task('{task_name}');
+ end;'''
+ cursor.execute(end_sql)
+ if close_conn:
+ self.close()
+ return result_set
def close(self):
if self.conn:
self.conn.close()
- self.conn = None
+ self.conn = None
\ No newline at end of file