Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

🇶🇦 Fix Errors from code ql scanning #1081

Merged
merged 3 commits into from
Apr 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 13 additions & 3 deletions .github/workflows/codeql.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ jobs:

steps:
- name: Checkout repository
uses: actions/checkout@v3
uses: actions/checkout@v4

# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
Expand All @@ -52,7 +52,8 @@ jobs:
# Prefix the list here with "+" to use these queries and those in the config file.

# Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
queries: security-extended,security-and-quality
packs: "codeql/${{ matrix.language }}-queries:AlertSuppression.ql"


# Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java).
Expand All @@ -71,6 +72,15 @@ jobs:
# ./location_of_script_within_repo/buildscript.sh

- name: Perform CodeQL Analysis
id: analyze
uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"
output: ${{ matrix.language }}-sarif-results

- name: Dismiss alerts
uses: advanced-security/dismiss-alerts@v1
with:
sarif-id: ${{ steps.analyze.outputs.sarif-id }}
sarif-file: ${{ matrix.language }}-sarif-results/${{ matrix.language }}.sarif
env:
GITHUB_TOKEN: ${{ github.token }}
1 change: 1 addition & 0 deletions examples/directml/llama_v2/llama_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,7 @@ def download_checkpoint(model_type: str):
opener.addheaders = [("User-agent", "wget")]
urllib.request.install_opener(opener)

email_url = None
if not (
license_path.is_file() and use_policy_path.is_file() and tokenizer_path.is_file() and weights_path.is_file()
):
Expand Down
1 change: 1 addition & 0 deletions examples/gptj/user_script.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ def create_onnx_dataloader(data_dir, batch_size, *args, **kwargs):

def create_dataloader(data_dir, batch_size, *args, **kwargs):
model_framework = kwargs.pop("model_framework")
dataloader = None
if model_framework == Framework.ONNX:
dataloader = create_onnx_dataloader(data_dir, batch_size)
elif model_framework == Framework.PYTORCH:
Expand Down
2 changes: 1 addition & 1 deletion examples/inception/download_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def download_model():
request.urlretrieve(inception_v3_archive_url, inception_v3_archive_path)

with tarfile.open(inception_v3_archive_path) as tar_ref:
tar_ref.extractall(stage_dir)
tar_ref.extractall(stage_dir) # lgtm
Fixed Show fixed Hide fixed

model_path = models_dir / "inception_v3.pb"
if model_path.exists():
Expand Down
2 changes: 1 addition & 1 deletion examples/mobilenet/download_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def download_model():
request.urlretrieve(mobilenet_archive_url, mobilenet_archive_path)

with tarfile.open(mobilenet_archive_path) as tar_ref:
tar_ref.extractall(stage_dir)
tar_ref.extractall(stage_dir) # lgtm
Fixed Show fixed Hide fixed
original_model_path = stage_dir / mobilenet_name / f"{mobilenet_name}.onnx"
model_path = models_dir / f"{mobilenet_name}.onnx"
if model_path.exists():
Expand Down
1 change: 1 addition & 0 deletions examples/open_llama/user_script.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,7 @@ def calib_dataloader(data_dir, batch_size, *args, **kwargs):
def eval_accuracy(model: OliveModelHandler, data_dir, batch_size, device, execution_providers):
from intel_extension_for_transformers.llm.evaluation.lm_eval import evaluate

results = {}
if model.framework == Framework.PYTORCH:
results = evaluate(
model="hf-causal",
Expand Down
4 changes: 2 additions & 2 deletions examples/resnet/prepare_model_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,8 +151,8 @@ def main():

data_download_path = data_dir / "cifar-10-python.tar.gz"
urllib.request.urlretrieve("https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz", data_download_path)
with tarfile.open(data_download_path) as file:
file.extractall(data_dir)
with tarfile.open(data_download_path) as tar:
tar.extractall(data_dir) # lgtm
Fixed Show fixed Hide fixed

prepare_model(args.num_epochs, models_dir, data_dir)

Expand Down
2 changes: 2 additions & 0 deletions examples/stable_diffusion/stable_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -351,6 +351,7 @@ def main(raw_args=None):
guidance_scale = 0.0
print(f"WARNING: Classifier free guidance has been forcefully disabled since {model_id} doesn't support it.")

ov_args, ort_args = None, None
if provider == "openvino":
ov_args, extra_args = parse_ov_args(extra_args)
else:
Expand Down Expand Up @@ -381,6 +382,7 @@ def main(raw_args=None):

pipeline = get_ort_pipeline(model_dir, common_args, ort_args, guidance_scale)
if provider == "openvino" and (ov_args.image_path or ov_args.img_to_img_example):
res = None
if ov_args.image_path:
from sd_utils.ov import run_ov_image_inference

Expand Down
1 change: 1 addition & 0 deletions examples/whisper/test_transcription.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ def main(raw_args=None):

# load output model json
output_model_json_path = Path(config["engine"]["output_dir"])
output_model_json = {}
for model_json in output_model_json_path.glob(
f"**/{config['engine']['output_name']}_{accelerator_spec}_model.json"
):
Expand Down
20 changes: 10 additions & 10 deletions olive/passes/onnx/vitis_ai/refine.py
Original file line number Diff line number Diff line change
Expand Up @@ -287,7 +287,7 @@ def adjust_shift_read(self):
iposes = []
skip = False

for i in len(node.input):
for i in range(len(node.input)):
ipos_name = self.get_ipos_name_by_id(node, i)
ipos_layers.append(ipos_name)
for i in ipos_layers:
Expand Down Expand Up @@ -337,22 +337,22 @@ def adjust_shift_write(self):
DPU compiler constraints of shift_write:
1. -15 <= shift_write <= 15
"""
for i, node in enumerate(self.model.model.graph.node):
for node in self.model.model.graph.node:
if node.op_type not in ["Add"] or node.op_type not in ["Mul"]:
continue
ipos_layers = []
iposes = []
skip = False

for i in len(node.input):
ipos_name = self.get_ipos_name_by_id(node, i)
for input_id in range(len(node.input)):
ipos_name = self.get_ipos_name_by_id(node, input_id)
ipos_layers.append(ipos_name)
for i in ipos_layers:
ipos, _ = self.get_pos_by_name(i)
for layer_id in ipos_layers:
ipos, _ = self.get_pos_by_name(layer_id)
if ipos is None:
logger.info(
"Fail to get quantize position for layer {}(input:{}) (output of layer {}), "
"skip adjust_shift_read for it.".format(ipos_layers[i], i, ipos_layers[i])
"skip adjust_shift_read for it.".format(ipos_layers[layer_id], layer_id, ipos_layers[layer_id])
)
skip = True
iposes.append(ipos)
Expand Down Expand Up @@ -395,7 +395,7 @@ def adjust_shift_write(self):

def align_concat(self):
"""Align concat op's inputs and output pos."""
for i, node in enumerate(self.model.model.graph.node):
for node in self.model.model.graph.node:
if node.op_type not in ["Concat"]:
continue
input_node_num = len(node.input)
Expand All @@ -404,8 +404,8 @@ def align_concat(self):
min_pos = opos
ipos_layers = []

for i in range(input_node_num):
ipos_name = self.get_ipos_name_by_id(node, i)
for input_id in range(input_node_num):
ipos_name = self.get_ipos_name_by_id(node, input_id)
ipos_layers.append(ipos_name)
for name in ipos_layers:
ipos, _ = self.get_pos_by_name(name)
Expand Down
2 changes: 1 addition & 1 deletion olive/passes/pytorch/lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -666,7 +666,7 @@ def train_and_save_new_model(
logger.debug("train_result: %s", train_result)

if torch.cuda.is_available():
torch.backends.cuda.matmul.allow_tf32 = allow_tf32
torch.backends.cuda.matmul.allow_tf32 = allow_tf32 # lgtm
Dismissed Show dismissed Hide dismissed

# save adapter weights
adapter_path = Path(output_model_path) / "adapter"
Expand Down
1 change: 1 addition & 0 deletions olive/platform_sdk/qualcomm/configure.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ def dev(args):
return

logger.info("Configuring %s for %s with python %s...", args.sdk, sdk_arch, args.py_version)
cmd = None
with resources.path(resource_path, script_name) as create_python_env_path:
if platform.system() == "Linux":
cmd = f"bash {create_python_env_path} -v {args.py_version} --sdk {args.sdk}"
Expand Down
2 changes: 2 additions & 0 deletions olive/platform_sdk/qualcomm/snpe/tools/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,7 @@ def snpe_net_run(
input_ids = get_input_ids(input_list)

# get the delimiter for the output files
delimiter = None
if platform.system() == "Linux":
delimiter = ":"
elif platform.system() == "Windows":
Expand Down Expand Up @@ -414,6 +415,7 @@ def snpe_throughput_net_run(
cmd = f"snpe-throughput-net-run --container {dlc_path} --duration {duration} --use_{device}"

input_raw = ""
first = ""
with Path(input_list).open() as f:
for line in f:
if line.startswith(("#", "%")):
Expand Down
12 changes: 6 additions & 6 deletions test/unit_test/systems/isolated_ort/test_isolated_ort_system.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,15 +175,15 @@ def test_inference_runner_with_run(self, mock_wrapper_class, mock_get_session, t
# setup
mock_wrapper = MagicMock()
mock_wrapper_class.return_value = mock_wrapper
dummy_latencies = [1, 2, 3, 4]
dummy_output = np.array([1, 2])
sleep_time = 0
num_runs = 4
num_warmup = 2
num_batches = 3
if mode == "inference":
num_batches = 3
dummy_output = np.array([1, 2])
mock_wrapper.run.return_value = dummy_output
else:
num_runs = 4
num_warmup = 2
sleep_time = 0
dummy_latencies = [1, 2, 3, 4]
mock_wrapper.time_run.return_value = dummy_latencies

model = "model.onnx"
Expand Down
Loading