diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 315d2c78e..f1f45affc 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -40,7 +40,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL @@ -52,7 +52,8 @@ jobs: # Prefix the list here with "+" to use these queries and those in the config file. # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs - # queries: security-extended,security-and-quality + queries: security-extended,security-and-quality + packs: "codeql/${{ matrix.language }}-queries:AlertSuppression.ql" # Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java). @@ -71,6 +72,15 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis + id: analyze uses: github/codeql-action/analyze@v3 with: - category: "/language:${{matrix.language}}" + output: ${{ matrix.language }}-sarif-results + + - name: Dismiss alerts + uses: advanced-security/dismiss-alerts@v1 + with: + sarif-id: ${{ steps.analyze.outputs.sarif-id }} + sarif-file: ${{ matrix.language }}-sarif-results/${{ matrix.language }}.sarif + env: + GITHUB_TOKEN: ${{ github.token }} diff --git a/examples/directml/llama_v2/llama_v2.py b/examples/directml/llama_v2/llama_v2.py index 771a41bc6..0782e5144 100644 --- a/examples/directml/llama_v2/llama_v2.py +++ b/examples/directml/llama_v2/llama_v2.py @@ -140,6 +140,7 @@ def download_checkpoint(model_type: str): opener.addheaders = [("User-agent", "wget")] urllib.request.install_opener(opener) + email_url = None if not ( license_path.is_file() and use_policy_path.is_file() and tokenizer_path.is_file() and weights_path.is_file() ): diff --git a/examples/gptj/user_script.py b/examples/gptj/user_script.py index e74520a96..a33740b13 100644 --- a/examples/gptj/user_script.py +++ b/examples/gptj/user_script.py @@ -92,6 +92,7 @@ def create_onnx_dataloader(data_dir, batch_size, *args, **kwargs): def create_dataloader(data_dir, batch_size, *args, **kwargs): model_framework = kwargs.pop("model_framework") + dataloader = None if model_framework == Framework.ONNX: dataloader = create_onnx_dataloader(data_dir, batch_size) elif model_framework == Framework.PYTORCH: diff --git a/examples/inception/download_files.py b/examples/inception/download_files.py index c249c6807..cfced79a4 100644 --- a/examples/inception/download_files.py +++ b/examples/inception/download_files.py @@ -47,7 +47,7 @@ def download_model(): request.urlretrieve(inception_v3_archive_url, inception_v3_archive_path) with tarfile.open(inception_v3_archive_path) as tar_ref: - tar_ref.extractall(stage_dir) + tar_ref.extractall(stage_dir) # lgtm model_path = models_dir / "inception_v3.pb" if model_path.exists(): diff --git a/examples/mobilenet/download_files.py b/examples/mobilenet/download_files.py index a189c8b0c..5730face8 100644 --- a/examples/mobilenet/download_files.py +++ b/examples/mobilenet/download_files.py @@ -51,7 +51,7 @@ def download_model(): request.urlretrieve(mobilenet_archive_url, mobilenet_archive_path) with tarfile.open(mobilenet_archive_path) as tar_ref: - tar_ref.extractall(stage_dir) + tar_ref.extractall(stage_dir) # lgtm original_model_path = stage_dir / mobilenet_name / f"{mobilenet_name}.onnx" model_path = models_dir / f"{mobilenet_name}.onnx" if model_path.exists(): diff --git a/examples/open_llama/user_script.py b/examples/open_llama/user_script.py index 20da9bc4b..e1e0eab8c 100644 --- a/examples/open_llama/user_script.py +++ b/examples/open_llama/user_script.py @@ -134,6 +134,7 @@ def calib_dataloader(data_dir, batch_size, *args, **kwargs): def eval_accuracy(model: OliveModelHandler, data_dir, batch_size, device, execution_providers): from intel_extension_for_transformers.llm.evaluation.lm_eval import evaluate + results = {} if model.framework == Framework.PYTORCH: results = evaluate( model="hf-causal", diff --git a/examples/resnet/prepare_model_data.py b/examples/resnet/prepare_model_data.py index 95740d1b4..63c18faae 100644 --- a/examples/resnet/prepare_model_data.py +++ b/examples/resnet/prepare_model_data.py @@ -151,8 +151,8 @@ def main(): data_download_path = data_dir / "cifar-10-python.tar.gz" urllib.request.urlretrieve("https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz", data_download_path) - with tarfile.open(data_download_path) as file: - file.extractall(data_dir) + with tarfile.open(data_download_path) as tar: + tar.extractall(data_dir) # lgtm prepare_model(args.num_epochs, models_dir, data_dir) diff --git a/examples/stable_diffusion/stable_diffusion.py b/examples/stable_diffusion/stable_diffusion.py index a14c79523..ebc0caf8a 100644 --- a/examples/stable_diffusion/stable_diffusion.py +++ b/examples/stable_diffusion/stable_diffusion.py @@ -351,6 +351,7 @@ def main(raw_args=None): guidance_scale = 0.0 print(f"WARNING: Classifier free guidance has been forcefully disabled since {model_id} doesn't support it.") + ov_args, ort_args = None, None if provider == "openvino": ov_args, extra_args = parse_ov_args(extra_args) else: @@ -381,6 +382,7 @@ def main(raw_args=None): pipeline = get_ort_pipeline(model_dir, common_args, ort_args, guidance_scale) if provider == "openvino" and (ov_args.image_path or ov_args.img_to_img_example): + res = None if ov_args.image_path: from sd_utils.ov import run_ov_image_inference diff --git a/examples/whisper/test_transcription.py b/examples/whisper/test_transcription.py index ea8336edd..73801f6d6 100644 --- a/examples/whisper/test_transcription.py +++ b/examples/whisper/test_transcription.py @@ -82,6 +82,7 @@ def main(raw_args=None): # load output model json output_model_json_path = Path(config["engine"]["output_dir"]) + output_model_json = {} for model_json in output_model_json_path.glob( f"**/{config['engine']['output_name']}_{accelerator_spec}_model.json" ): diff --git a/olive/passes/onnx/vitis_ai/refine.py b/olive/passes/onnx/vitis_ai/refine.py index e44fe9bac..41193372e 100644 --- a/olive/passes/onnx/vitis_ai/refine.py +++ b/olive/passes/onnx/vitis_ai/refine.py @@ -287,7 +287,7 @@ def adjust_shift_read(self): iposes = [] skip = False - for i in len(node.input): + for i in range(len(node.input)): ipos_name = self.get_ipos_name_by_id(node, i) ipos_layers.append(ipos_name) for i in ipos_layers: @@ -337,22 +337,22 @@ def adjust_shift_write(self): DPU compiler constraints of shift_write: 1. -15 <= shift_write <= 15 """ - for i, node in enumerate(self.model.model.graph.node): + for node in self.model.model.graph.node: if node.op_type not in ["Add"] or node.op_type not in ["Mul"]: continue ipos_layers = [] iposes = [] skip = False - for i in len(node.input): - ipos_name = self.get_ipos_name_by_id(node, i) + for input_id in range(len(node.input)): + ipos_name = self.get_ipos_name_by_id(node, input_id) ipos_layers.append(ipos_name) - for i in ipos_layers: - ipos, _ = self.get_pos_by_name(i) + for layer_id in ipos_layers: + ipos, _ = self.get_pos_by_name(layer_id) if ipos is None: logger.info( "Fail to get quantize position for layer {}(input:{}) (output of layer {}), " - "skip adjust_shift_read for it.".format(ipos_layers[i], i, ipos_layers[i]) + "skip adjust_shift_read for it.".format(ipos_layers[layer_id], layer_id, ipos_layers[layer_id]) ) skip = True iposes.append(ipos) @@ -395,7 +395,7 @@ def adjust_shift_write(self): def align_concat(self): """Align concat op's inputs and output pos.""" - for i, node in enumerate(self.model.model.graph.node): + for node in self.model.model.graph.node: if node.op_type not in ["Concat"]: continue input_node_num = len(node.input) @@ -404,8 +404,8 @@ def align_concat(self): min_pos = opos ipos_layers = [] - for i in range(input_node_num): - ipos_name = self.get_ipos_name_by_id(node, i) + for input_id in range(input_node_num): + ipos_name = self.get_ipos_name_by_id(node, input_id) ipos_layers.append(ipos_name) for name in ipos_layers: ipos, _ = self.get_pos_by_name(name) diff --git a/olive/passes/pytorch/lora.py b/olive/passes/pytorch/lora.py index 136123737..662dc0f4f 100644 --- a/olive/passes/pytorch/lora.py +++ b/olive/passes/pytorch/lora.py @@ -666,7 +666,7 @@ def train_and_save_new_model( logger.debug("train_result: %s", train_result) if torch.cuda.is_available(): - torch.backends.cuda.matmul.allow_tf32 = allow_tf32 + torch.backends.cuda.matmul.allow_tf32 = allow_tf32 # lgtm # save adapter weights adapter_path = Path(output_model_path) / "adapter" diff --git a/olive/platform_sdk/qualcomm/configure.py b/olive/platform_sdk/qualcomm/configure.py index 8f985c8eb..4945b4be7 100644 --- a/olive/platform_sdk/qualcomm/configure.py +++ b/olive/platform_sdk/qualcomm/configure.py @@ -34,6 +34,7 @@ def dev(args): return logger.info("Configuring %s for %s with python %s...", args.sdk, sdk_arch, args.py_version) + cmd = None with resources.path(resource_path, script_name) as create_python_env_path: if platform.system() == "Linux": cmd = f"bash {create_python_env_path} -v {args.py_version} --sdk {args.sdk}" diff --git a/olive/platform_sdk/qualcomm/snpe/tools/inference.py b/olive/platform_sdk/qualcomm/snpe/tools/inference.py index 8dc056951..4926b2ea8 100644 --- a/olive/platform_sdk/qualcomm/snpe/tools/inference.py +++ b/olive/platform_sdk/qualcomm/snpe/tools/inference.py @@ -213,6 +213,7 @@ def snpe_net_run( input_ids = get_input_ids(input_list) # get the delimiter for the output files + delimiter = None if platform.system() == "Linux": delimiter = ":" elif platform.system() == "Windows": @@ -414,6 +415,7 @@ def snpe_throughput_net_run( cmd = f"snpe-throughput-net-run --container {dlc_path} --duration {duration} --use_{device}" input_raw = "" + first = "" with Path(input_list).open() as f: for line in f: if line.startswith(("#", "%")): diff --git a/test/unit_test/systems/isolated_ort/test_isolated_ort_system.py b/test/unit_test/systems/isolated_ort/test_isolated_ort_system.py index 2e8a67f2b..fb92fff66 100644 --- a/test/unit_test/systems/isolated_ort/test_isolated_ort_system.py +++ b/test/unit_test/systems/isolated_ort/test_isolated_ort_system.py @@ -175,15 +175,15 @@ def test_inference_runner_with_run(self, mock_wrapper_class, mock_get_session, t # setup mock_wrapper = MagicMock() mock_wrapper_class.return_value = mock_wrapper + dummy_latencies = [1, 2, 3, 4] + dummy_output = np.array([1, 2]) + sleep_time = 0 + num_runs = 4 + num_warmup = 2 + num_batches = 3 if mode == "inference": - num_batches = 3 - dummy_output = np.array([1, 2]) mock_wrapper.run.return_value = dummy_output else: - num_runs = 4 - num_warmup = 2 - sleep_time = 0 - dummy_latencies = [1, 2, 3, 4] mock_wrapper.time_run.return_value = dummy_latencies model = "model.onnx"