From 6b526fadd5dc06142417028517c1aab542b55652 Mon Sep 17 00:00:00 2001 From: Kyongsik Yun Date: Wed, 14 Aug 2024 22:06:47 -0700 Subject: [PATCH 01/10] Add files via upload --- slim-cli.py | 161 ++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 117 insertions(+), 44 deletions(-) diff --git a/slim-cli.py b/slim-cli.py index 44fcd82..a88dbc5 100644 --- a/slim-cli.py +++ b/slim-cli.py @@ -22,7 +22,8 @@ SLIM_REGISTRY_URI = "https://raw.githubusercontent.com/NASA-AMMOS/slim/issue-154/static/data/slim-registry.json" SUPPORTED_MODELS = { "openai": ["gpt-3.5-turbo", "gpt-4o"], - "ollama": ["llama2", "mistral", "codellama"], + "ollama": ["llama3.1", "mistral", "codellama"], + "azure" : ["gpt-3.5-turbo", "gpt-4o"], # Add more models as needed } GIT_BRANCH_NAME_FOR_MULTIPLE_COMMITS = 'slim-best-practices' @@ -120,17 +121,20 @@ def use_ai(best_practice_id: str, repo_path: str, template_path: str, model: str # Fetch the code base for SLIM-3.1 readme (limited to specific file types) if best_practice_id == 'SLIM-1.1': #governance reference = fetch_readme(repo_path) + # Construct the prompt for the AI + prompt = construct_prompt(template_content, best_practice, reference) elif best_practice_id == 'SLIM-3.1': #readme reference = fetch_code_base(repo_path) + # Construct the prompt for the AI + prompt = construct_prompt(template_content, best_practice, reference) else: reference = fetch_readme(repo_path) + # Construct the prompt for the AI + prompt = construct_prompt(template_content, best_practice, reference) if not reference: return None - # Construct the prompt for the AI - prompt = construct_prompt(template_content, best_practice, reference) - print("prompt: ") - print(prompt) + # Generate the content using the specified model new_content = generate_content(prompt, model) #print("output: ") @@ -162,11 +166,14 @@ def fetch_code_base(repo_path: str) -> Optional[str]: def construct_prompt(template_content: str, best_practice: Dict[str, Any], reference: str) -> str: return ( f"Fill out all blanks in the template below that start with INSERT. Return the result as Markdown code.\n\n" - #f"Best Practice: {best_practice['title']}\n" - #f"Description: {best_practice['description']}\n\n" + ##f"Best Practice: {best_practice['title']}\n" + ##f"Description: {best_practice['description']}\n\n" f"Template and output format:\n{template_content}\n\n" f"Use the info:\n{reference}...\n\n" f"Show only the updated template output as markdown code." + + #f"Use the info:\n{reference}...\n\n" + #f"Generate unit tests" ) def generate_content(prompt: str, model: str) -> Optional[str]: @@ -182,6 +189,16 @@ def generate_content(prompt: str, model: str) -> Optional[str]: print("\nError occurred during generation.") print() # Print a newline at the end return ''.join(collected_response) + elif model_provider == "azure": + collected_response = [] + for token in generate_with_azure(prompt, model_name): + if token is not None: + print(token, end='', flush=True) + collected_response.append(token) + else: + print("\nError occurred during generation.") + print() # Print a newline at the end + return ''.join(collected_response) elif model_provider == "ollama": return generate_with_ollama(prompt, model_name) else: @@ -197,14 +214,59 @@ def read_file_content(file_path: str) -> Optional[str]: logging.error(f"Error reading file {file_path}: {e}") return None +import os +import requests +from azure.identity import ClientSecretCredential +from dotenv import load_dotenv + +load_dotenv() + +TENANT_ID = os.getenv("AZURE_TENANT_ID") +CLIENT_ID = os.getenv("AZURE_CLIENT_ID") +CLIENT_SECRET = os.getenv("AZURE_CLIENT_SECRET") +API_ENDPOINT = os.getenv("API_ENDPOINT") +DEPLOYMENT_ID = os.getenv("DEPLOYMENT_ID") + +authority = "https://login.microsoftonline.com" +credential = ClientSecretCredential( + tenant_id=TENANT_ID, + client_id=CLIENT_ID, + client_secret=CLIENT_SECRET, + authority=authority, +) + +def generate_with_azure(prompt: str, model_name: str) -> Optional[str]: + access_token = credential.get_token("https://cognitiveservices.azure.com/.default").token + headers = { + "Authorization": f"Bearer {access_token}", + "Content-Type": "application/json" + } + data = { + "deploymentId": DEPLOYMENT_ID, + "messages": [{"role": "user", "content": prompt}], + "stream": True + } + + try: + response = requests.post(API_ENDPOINT, headers=headers, json=data, stream=True) + response.raise_for_status() + + for line in response.iter_lines(): + if line: + chunk = line.decode("utf-8") + # Assuming the response format and extracting content accordingly + if 'delta' in chunk: + content = chunk.get("choices", [{}])[0].get("delta", {}).get("content") + if content: + yield content + except Exception as e: + print(f"An error occurred: {e}") + yield None def generate_with_openai(prompt: str, model_name: str) -> Optional[str]: from openai import OpenAI client = OpenAI(api_key = os.getenv('OPENAI_API_KEY')) - #if not openai.api_key: - # logging.error("OpenAI API key is missing.") - # return None - + try: response = client.chat.completions.create( model=model_name, @@ -259,7 +321,7 @@ def generate_git_branch_name(best_practice_ids): else: return None -def apply_best_practices(best_practice_ids, use_ai_flag, repo_urls = None, existing_repo_dir = None, target_dir_to_clone_to = None): +def apply_best_practices(best_practice_ids, use_ai_flag, model, repo_urls = None, existing_repo_dir = None, target_dir_to_clone_to = None): for repo_url in repo_urls: if len(best_practice_ids) > 1: @@ -278,6 +340,7 @@ def apply_best_practices(best_practice_ids, use_ai_flag, repo_urls = None, exist apply_best_practice( best_practice_id=best_practice_id, use_ai_flag=use_ai_flag, + model=model, repo_url=repo_url, target_dir_to_clone_to=target_dir_to_clone_to, branch=GIT_BRANCH_NAME_FOR_MULTIPLE_COMMITS) @@ -288,23 +351,25 @@ def apply_best_practices(best_practice_ids, use_ai_flag, repo_urls = None, exist apply_best_practice( best_practice_id=best_practice_id, use_ai_flag=use_ai_flag, + model=model, repo_url=repo_url, target_dir_to_clone_to=repo_dir, branch=GIT_BRANCH_NAME_FOR_MULTIPLE_COMMITS) else: for best_practice_id in best_practice_ids: - apply_best_practice(best_practice_id=best_practice_id, use_ai_flag=use_ai_flag, existing_repo_dir=existing_repo_dir) + apply_best_practice(best_practice_id=best_practice_id, use_ai_flag=use_ai_flag, model=model, existing_repo_dir=existing_repo_dir) elif len(best_practice_ids) == 1: apply_best_practice( best_practice_id=best_practice_ids[0], use_ai_flag=use_ai_flag, + model=model, repo_url=repo_url, existing_repo_dir=existing_repo_dir, target_dir_to_clone_to=target_dir_to_clone_to) else: logging.error(f"No best practice IDs specified.") -def apply_best_practice(best_practice_id, use_ai_flag, repo_url = None, existing_repo_dir = None, target_dir_to_clone_to = None, branch = None): +def apply_best_practice(best_practice_id, use_ai_flag, model, repo_url = None, existing_repo_dir = None, target_dir_to_clone_to = None, branch = None): applied_file_path = None # default return value is invalid applied best practice logging.debug(f"AI features {'enabled' if use_ai_flag else 'disabled'} for applying best practices") @@ -399,7 +464,7 @@ def apply_best_practice(best_practice_id, use_ai_flag, repo_url = None, existing # Custom AI processing code to go here using and modifying applied_file_path content - ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path) #template file path + ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path, model) #template file path if ai_content: with open(applied_file_path, 'w') as f: f.write(ai_content) @@ -412,7 +477,7 @@ def apply_best_practice(best_practice_id, use_ai_flag, repo_url = None, existing #logging.warning(f"AI apply features unsupported for best practice {best_practice_id} currently") # Custom AI processing code to go here using and modifying applied_file_path content - ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path) + ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path, model) if ai_content: with open(applied_file_path, 'w') as f: f.write(ai_content) @@ -438,7 +503,7 @@ def apply_best_practice(best_practice_id, use_ai_flag, repo_url = None, existing #logging.warning(f"AI apply features unsupported for best practice {best_practice_id} currently") # Custom AI processing code to go here using and modifying applied_file_path content - ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path) + ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path, model) if ai_content: with open(applied_file_path, 'w') as f: f.write(ai_content) @@ -451,7 +516,7 @@ def apply_best_practice(best_practice_id, use_ai_flag, repo_url = None, existing #logging.warning(f"AI apply features unsupported for best practice {best_practice_id} currently") # Custom AI processing code to go here using and modifying applied_file_path content - ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path) + ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path, model) if ai_content: with open(applied_file_path, 'w') as f: f.write(ai_content) @@ -464,7 +529,7 @@ def apply_best_practice(best_practice_id, use_ai_flag, repo_url = None, existing #logging.warning(f"AI apply features unsupported for best practice {best_practice_id} currently") # Custom AI processing code to go here using and modifying applied_file_path content - ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path) + ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path, model) if ai_content: with open(applied_file_path, 'w') as f: f.write(ai_content) @@ -477,7 +542,7 @@ def apply_best_practice(best_practice_id, use_ai_flag, repo_url = None, existing #logging.warning(f"AI apply features unsupported for best practice {best_practice_id} currently") # Custom AI processing code to go here using and modifying applied_file_path content - ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path) + ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path, model) if ai_content: with open(applied_file_path, 'w') as f: f.write(ai_content) @@ -487,7 +552,7 @@ def apply_best_practice(best_practice_id, use_ai_flag, repo_url = None, existing elif best_practice_id == 'SLIM-5.1': applied_file_path = download_and_place_file(git_repo, uri, 'CHANGELOG.md') if use_ai_flag: - ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path) + ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path, model) if ai_content: with open(applied_file_path, 'w') as f: f.write(ai_content) @@ -497,7 +562,7 @@ def apply_best_practice(best_practice_id, use_ai_flag, repo_url = None, existing elif best_practice_id == 'SLIM-7.1': applied_file_path = download_and_place_file(git_repo, uri, '.github/PULL_REQUEST_TEMPLATE.md') if use_ai_flag: - ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path) + ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path, model) if ai_content: with open(applied_file_path, 'w') as f: f.write(ai_content) @@ -510,7 +575,7 @@ def apply_best_practice(best_practice_id, use_ai_flag, repo_url = None, existing #logging.warning(f"AI apply features unsupported for best practice {best_practice_id} currently") # Custom AI processing code to go here using and modifying applied_file_path content - ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path) + ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path, model) if ai_content: with open(applied_file_path, 'w') as f: f.write(ai_content) @@ -520,7 +585,7 @@ def apply_best_practice(best_practice_id, use_ai_flag, repo_url = None, existing elif best_practice_id == 'SLIM-9.1': applied_file_path = download_and_place_file(git_repo, uri, 'CONTRIBUTING.md') if use_ai_flag: - ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path) + ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path, model) if ai_content: with open(applied_file_path, 'w') as f: f.write(ai_content) @@ -530,13 +595,13 @@ def apply_best_practice(best_practice_id, use_ai_flag, repo_url = None, existing elif best_practice_id == 'SLIM-13.1': applied_file_path = download_and_place_file(git_repo, uri, 'TESTING.md') if use_ai_flag: - ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path) + ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path, model) if ai_content: with open(applied_file_path, 'w') as f: f.write(ai_content) logging.info(f"Applied AI-generated content to {applied_file_path}") else: - logging.warning(f"AI generation failed for best practice {best_practice_id}") + logging.warning(f"AI generation failed for best practice {best_practice_id}") else: applied_file_path = None # nothing was modified logging.warning(f"SLIM best practice {best_practice_id} not supported.") @@ -615,7 +680,7 @@ def deploy_best_practice(best_practice_id, repo_dir, remote_name='origin', commi return False -def apply_and_deploy_best_practices(best_practice_ids, use_ai_flag, remote_name = GIT_DEFAULT_REMOTE_NAME, commit_message = GIT_DEFAULT_COMMIT_MESSAGE, repo_urls=None, existing_repo_dir=None, target_dir_to_clone_to=None): +def apply_and_deploy_best_practices(best_practice_ids, use_ai_flag, model, remote_name = GIT_DEFAULT_REMOTE_NAME, commit_message = GIT_DEFAULT_COMMIT_MESSAGE, repo_urls=None, existing_repo_dir=None, target_dir_to_clone_to=None): branch_name = generate_git_branch_name(best_practice_ids) for repo_url in repo_urls: @@ -629,6 +694,7 @@ def apply_and_deploy_best_practices(best_practice_ids, use_ai_flag, remote_name git_repo = apply_best_practice( best_practice_id=best_practice_id, use_ai_flag=use_ai_flag, + model=model, repo_url=repo_url, target_dir_to_clone_to=target_dir_to_clone_to, branch=branch_name) @@ -650,6 +716,7 @@ def apply_and_deploy_best_practices(best_practice_ids, use_ai_flag, remote_name git_repo = apply_best_practice( best_practice_id=best_practice_id, use_ai_flag=use_ai_flag, + model=model, repo_url=repo_url, target_dir_to_clone_to=repo_dir, branch=branch_name) @@ -666,7 +733,7 @@ def apply_and_deploy_best_practices(best_practice_ids, use_ai_flag, remote_name logging.error(f"Unable to deploy best practice '{best_practice_id}' because apply failed.") else: for best_practice_id in best_practice_ids: - git_repo = apply_best_practice(best_practice_id=best_practice_id, use_ai_flag=use_ai_flag, existing_repo_dir=existing_repo_dir) + git_repo = apply_best_practice(best_practice_id=best_practice_id, use_ai_flag=use_ai_flag, model=model, existing_repo_dir=existing_repo_dir) # deploy just the last best practice, which deploys others as well if git_repo: @@ -682,6 +749,7 @@ def apply_and_deploy_best_practices(best_practice_ids, use_ai_flag, remote_name git_repo = apply_best_practice( best_practice_id=best_practice_ids[0], use_ai_flag=use_ai_flag, + model=model, repo_url=repo_url, existing_repo_dir=existing_repo_dir, target_dir_to_clone_to=branch_name) @@ -689,7 +757,7 @@ def apply_and_deploy_best_practices(best_practice_ids, use_ai_flag, remote_name # deploy just the last best practice, which deploys others as well if git_repo: deploy_best_practice( - best_practice_id=best_practice_id, + best_practice_id=best_practice_ids[0], repo_dir=git_repo.working_tree_dir, remote_name=remote_name, commit_message=commit_message, @@ -699,25 +767,26 @@ def apply_and_deploy_best_practices(best_practice_ids, use_ai_flag, remote_name else: logging.error(f"No best practice IDs specified.") - # for best_practice_id in best_practice_ids: - # result = apply_and_deploy_best_practice( - # best_practice_id, - # use_ai_flag, - # remote_name, - # commit_message, - # repo_url, - # existing_repo_dir, - # target_dir_to_clone_to, - # branch=branch_name) - # if not result: - # logging.error(f"Failed to apply and deploy best practice ID: {best_practice_id}") - -def apply_and_deploy_best_practice(best_practice_id, use_ai_flag, remote_name=GIT_DEFAULT_REMOTE_NAME, commit_message=GIT_DEFAULT_COMMIT_MESSAGE, repo_url = None, existing_repo_dir = None, target_dir_to_clone_to = None, branch = None): + for best_practice_id in best_practice_ids: + result = apply_and_deploy_best_practice( + best_practice_id, + use_ai_flag, + model, + remote_name, + commit_message, + repo_url, + existing_repo_dir, + target_dir_to_clone_to, + branch=branch_name) + if not result: + logging.error(f"Failed to apply and deploy best practice ID: {best_practice_id}") + +def apply_and_deploy_best_practice(best_practice_id, use_ai_flag, model, remote_name=GIT_DEFAULT_REMOTE_NAME, commit_message=GIT_DEFAULT_COMMIT_MESSAGE, repo_url = None, existing_repo_dir = None, target_dir_to_clone_to = None, branch = None): logging.debug("AI customization enabled for applying and deploying best practices" if use_ai_flag else "AI customization disabled") logging.debug(f"Applying and deploying best practice ID: {best_practice_id}") # Apply the best practice - git_repo = apply_best_practice(best_practice_id=best_practice_id, use_ai_flag=use_ai_flag, repo_url=repo_url, existing_repo_dir=existing_repo_dir, target_dir_to_clone_to=target_dir_to_clone_to, branch=branch) + git_repo = apply_best_practice(best_practice_id=best_practice_id, use_ai_flag=use_ai_flag, model=model, repo_url=repo_url, existing_repo_dir=existing_repo_dir, target_dir_to_clone_to=target_dir_to_clone_to, branch=branch) # Deploy the best practice if applied successfully if git_repo: @@ -748,9 +817,11 @@ def create_parser(): parser_apply.add_argument('--repo-dir', required=False, help='Repository directory location on local machine. Only one repository supported') parser_apply.add_argument('--clone-to_dir', required=False, help='Local path to clone repository to. Compatible with --repo-urls') parser_apply.add_argument('--use-ai', action='store_true', help='Automatically customize the application of the best practice') + parser_apply.add_argument('--model', required=False, help='Model name (ollama/gpt-4o) for using ai') parser_apply.set_defaults(func=lambda args: apply_best_practices( best_practice_ids=args.best_practice_ids, use_ai_flag=args.use_ai, + model=args.model, repo_urls=args.repo_urls, existing_repo_dir=args.repo_dir, target_dir_to_clone_to=args.clone_to_dir @@ -776,11 +847,13 @@ def create_parser(): parser_apply_deploy.add_argument('--repo-dir', required=False, help='Repository directory location on local machine. Only one repository supported') parser_apply_deploy.add_argument('--clone-to-dir', required=False, help='Local path to clone repository to. Compatible with --repo-urls') parser_apply_deploy.add_argument('--use-ai', action='store_true', help='Automatically customize the application of the best practice') + parser_apply_deploy.add_argument('--model', required=False, help='Model name (ollama/gpt-4o) for using ai') parser_apply_deploy.add_argument('--remote-name', required=False, default=GIT_DEFAULT_REMOTE_NAME, help=f"Name of the remote to push changes to. Default: '{GIT_DEFAULT_REMOTE_NAME}") parser_apply_deploy.add_argument('--commit-message', required=False, default=GIT_DEFAULT_COMMIT_MESSAGE, help=f"Commit message to use for the deployment. Default '{GIT_DEFAULT_COMMIT_MESSAGE}") parser_apply_deploy.set_defaults(func=lambda args: apply_and_deploy_best_practices( best_practice_ids=args.best_practice_ids, use_ai_flag=args.use_ai, + model=args.model, remote_name=args.remote_name, commit_message=args.commit_message, repo_urls=args.repo_urls, From 028fb668b93289bd769456c38203accffb9941c3 Mon Sep 17 00:00:00 2001 From: Kyongsik Yun Date: Wed, 14 Aug 2024 22:17:30 -0700 Subject: [PATCH 02/10] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 76ffccd..f3a9707 100644 --- a/README.md +++ b/README.md @@ -94,7 +94,7 @@ This section provides detailed commands to interact with the SLIM CLI. Each comm ``` - To apply a best practice using AI customization: ```bash - python slim_cli.py apply --best-practice-ids SLIM-3.1 --repo-urls https://github.com/your_org/your_repo.git --use-ai + python slim_cli.py apply --best-practice-ids SLIM-3.1 --repo-urls https://github.com/your_org/your_repo.git --use-ai --model openai/gpt-4o ``` 3. **Deploy a best practice** From df8eb86f477728f788a703be584c73d4128f3eb5 Mon Sep 17 00:00:00 2001 From: Rishi Verma Date: Thu, 15 Aug 2024 23:21:04 -0700 Subject: [PATCH 03/10] Fixed missing azure dependency --- requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c358ce7..2c70e43 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,4 +3,5 @@ tabulate openai python-dotenv rich -gitpython \ No newline at end of file +gitpython +azure-identity \ No newline at end of file From 2c6d7007aa6cb5135cfa2cabba9b1ea8ddcea6e5 Mon Sep 17 00:00:00 2001 From: Kyongsik Yun Date: Sat, 17 Aug 2024 16:11:10 -0700 Subject: [PATCH 04/10] Update slim-cli.py --- slim-cli.py | 49 ++++++++++++++++++++++++++++++++----------------- 1 file changed, 32 insertions(+), 17 deletions(-) diff --git a/slim-cli.py b/slim-cli.py index a88dbc5..c6d1639 100644 --- a/slim-cli.py +++ b/slim-cli.py @@ -18,6 +18,8 @@ import git import urllib + + # Constants SLIM_REGISTRY_URI = "https://raw.githubusercontent.com/NASA-AMMOS/slim/issue-154/static/data/slim-registry.json" SUPPORTED_MODELS = { @@ -214,28 +216,41 @@ def read_file_content(file_path: str) -> Optional[str]: logging.error(f"Error reading file {file_path}: {e}") return None -import os -import requests -from azure.identity import ClientSecretCredential -from dotenv import load_dotenv -load_dotenv() -TENANT_ID = os.getenv("AZURE_TENANT_ID") -CLIENT_ID = os.getenv("AZURE_CLIENT_ID") -CLIENT_SECRET = os.getenv("AZURE_CLIENT_SECRET") -API_ENDPOINT = os.getenv("API_ENDPOINT") -DEPLOYMENT_ID = os.getenv("DEPLOYMENT_ID") -authority = "https://login.microsoftonline.com" -credential = ClientSecretCredential( - tenant_id=TENANT_ID, - client_id=CLIENT_ID, - client_secret=CLIENT_SECRET, - authority=authority, -) def generate_with_azure(prompt: str, model_name: str) -> Optional[str]: + from azure.identity import ClientSecretCredential + from dotenv import load_dotenv + + load_dotenv() + + TENANT_ID = os.getenv("AZURE_TENANT_ID") + CLIENT_ID = os.getenv("AZURE_CLIENT_ID") + CLIENT_SECRET = os.getenv("AZURE_CLIENT_SECRET") + API_ENDPOINT = os.getenv("API_ENDPOINT") + DEPLOYMENT_ID = os.getenv("DEPLOYMENT_ID") + + # Check if all required environment variables are set + if not all([TENANT_ID, CLIENT_ID, CLIENT_SECRET, API_ENDPOINT, DEPLOYMENT_ID]): + missing_vars = [var for var, value in [ + ("AZURE_TENANT_ID", TENANT_ID), + ("AZURE_CLIENT_ID", CLIENT_ID), + ("AZURE_CLIENT_SECRET", CLIENT_SECRET), + ("API_ENDPOINT", API_ENDPOINT), + ("DEPLOYMENT_ID", DEPLOYMENT_ID), + ] if value is None] + raise ValueError(f"Missing environment variables: {', '.join(missing_vars)}") + + authority = "https://login.microsoftonline.com" + credential = ClientSecretCredential( + tenant_id=TENANT_ID, + client_id=CLIENT_ID, + client_secret=CLIENT_SECRET, + authority=authority, + ) + access_token = credential.get_token("https://cognitiveservices.azure.com/.default").token headers = { "Authorization": f"Bearer {access_token}", From e1b91cb5e9be07540285c9da7969fd620f08c89d Mon Sep 17 00:00:00 2001 From: Kyongsik Yun Date: Sat, 17 Aug 2024 16:19:09 -0700 Subject: [PATCH 05/10] Update slim-cli.py --- slim-cli.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/slim-cli.py b/slim-cli.py index c6d1639..a60f12d 100644 --- a/slim-cli.py +++ b/slim-cli.py @@ -831,12 +831,13 @@ def create_parser(): parser_apply.add_argument('--repo-urls', nargs='+', required=False, help='Repository URLs to apply to. Do not use if --repo-dir specified') parser_apply.add_argument('--repo-dir', required=False, help='Repository directory location on local machine. Only one repository supported') parser_apply.add_argument('--clone-to_dir', required=False, help='Local path to clone repository to. Compatible with --repo-urls') - parser_apply.add_argument('--use-ai', action='store_true', help='Automatically customize the application of the best practice') - parser_apply.add_argument('--model', required=False, help='Model name (ollama/gpt-4o) for using ai') + #parser_apply.add_argument('--use-ai', action='store_true', help='Automatically customize the application of the best practice') + parser_apply.add_argument('--use-ai', metavar='MODEL', help='Automatically customize the application of the best practice with the specified AI model (e.g., openai/gpt-4o') + #parser_apply.add_argument('--model', required=False, help='Model name (openai/gpt-4o) for using ai') parser_apply.set_defaults(func=lambda args: apply_best_practices( best_practice_ids=args.best_practice_ids, - use_ai_flag=args.use_ai, - model=args.model, + use_ai_flag=bool(args.use_ai), + model=args.use_ai, repo_urls=args.repo_urls, existing_repo_dir=args.repo_dir, target_dir_to_clone_to=args.clone_to_dir @@ -861,14 +862,15 @@ def create_parser(): parser_apply_deploy.add_argument('--repo-urls', nargs='+', required=False, help='Repository URLs to apply to. Do not use if --repo-dir specified') parser_apply_deploy.add_argument('--repo-dir', required=False, help='Repository directory location on local machine. Only one repository supported') parser_apply_deploy.add_argument('--clone-to-dir', required=False, help='Local path to clone repository to. Compatible with --repo-urls') - parser_apply_deploy.add_argument('--use-ai', action='store_true', help='Automatically customize the application of the best practice') - parser_apply_deploy.add_argument('--model', required=False, help='Model name (ollama/gpt-4o) for using ai') + parser_apply_deploy.add_argument('--use-ai', metavar='MODEL', help='Automatically customize the application of the best practice with the specified AI model') + #parser_apply_deploy.add_argument('--use-ai', action='store_true', help='Automatically customize the application of the best practice') + #parser_apply_deploy.add_argument('--model', required=False, help='Model name (ollama/gpt-4o) for using ai') parser_apply_deploy.add_argument('--remote-name', required=False, default=GIT_DEFAULT_REMOTE_NAME, help=f"Name of the remote to push changes to. Default: '{GIT_DEFAULT_REMOTE_NAME}") parser_apply_deploy.add_argument('--commit-message', required=False, default=GIT_DEFAULT_COMMIT_MESSAGE, help=f"Commit message to use for the deployment. Default '{GIT_DEFAULT_COMMIT_MESSAGE}") parser_apply_deploy.set_defaults(func=lambda args: apply_and_deploy_best_practices( best_practice_ids=args.best_practice_ids, - use_ai_flag=args.use_ai, - model=args.model, + use_ai_flag=bool(args.use_ai), + model=args.use_ai, remote_name=args.remote_name, commit_message=args.commit_message, repo_urls=args.repo_urls, From d5742a3ccccc678294029678ea30dd7fa6464b1e Mon Sep 17 00:00:00 2001 From: Kyongsik Yun Date: Sat, 17 Aug 2024 16:19:59 -0700 Subject: [PATCH 06/10] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f3a9707..347d3b8 100644 --- a/README.md +++ b/README.md @@ -94,7 +94,7 @@ This section provides detailed commands to interact with the SLIM CLI. Each comm ``` - To apply a best practice using AI customization: ```bash - python slim_cli.py apply --best-practice-ids SLIM-3.1 --repo-urls https://github.com/your_org/your_repo.git --use-ai --model openai/gpt-4o + python slim_cli.py apply --best-practice-ids SLIM-3.1 --repo-urls https://github.com/your_org/your_repo.git --use-ai openai/gpt-4o ``` 3. **Deploy a best practice** From f6cd6a5a528a56e3d1402c45dd0ba86939ca3039 Mon Sep 17 00:00:00 2001 From: Kyongsik Yun Date: Sat, 17 Aug 2024 19:56:30 -0700 Subject: [PATCH 07/10] combining use_ai and model parameters --- slim-cli.py | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/slim-cli.py b/slim-cli.py index a60f12d..6466eb6 100644 --- a/slim-cli.py +++ b/slim-cli.py @@ -138,6 +138,8 @@ def use_ai(best_practice_id: str, repo_path: str, template_path: str, model: str # Generate the content using the specified model + print("model: ") + print(model) new_content = generate_content(prompt, model) #print("output: ") #print(new_content) @@ -216,10 +218,6 @@ def read_file_content(file_path: str) -> Optional[str]: logging.error(f"Error reading file {file_path}: {e}") return None - - - - def generate_with_azure(prompt: str, model_name: str) -> Optional[str]: from azure.identity import ClientSecretCredential from dotenv import load_dotenv @@ -474,7 +472,7 @@ def apply_best_practice(best_practice_id, use_ai_flag, model, repo_url = None, e # Process best practice by ID if best_practice_id == 'SLIM-1.1': applied_file_path = download_and_place_file(git_repo, uri, 'GOVERNANCE.md') - if use_ai_flag: + if use_ai_flag and model: #logging.warning(f"AI apply features unsupported for best practice {best_practice_id} currently") # Custom AI processing code to go here using and modifying applied_file_path content @@ -488,7 +486,7 @@ def apply_best_practice(best_practice_id, use_ai_flag, model, repo_url = None, e logging.warning(f"AI generation failed for best practice {best_practice_id}") elif best_practice_id == 'SLIM-1.2': applied_file_path = download_and_place_file(git_repo, uri, 'GOVERNANCE.md') - if use_ai_flag: + if use_ai_flag and model: #logging.warning(f"AI apply features unsupported for best practice {best_practice_id} currently") # Custom AI processing code to go here using and modifying applied_file_path content @@ -501,11 +499,11 @@ def apply_best_practice(best_practice_id, use_ai_flag, model, repo_url = None, e logging.warning(f"AI generation failed for best practice {best_practice_id}") elif best_practice_id == 'SLIM-3.1': applied_file_path = download_and_place_file(git_repo, uri, 'README.md') - if use_ai_flag: + if use_ai_flag and model: #logging.warning(f"AI apply features unsupported for best practice {best_practice_id} currently") # Custom AI processing code to go here using and modifying applied_file_path content - ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path) + ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path, model) if ai_content: with open(applied_file_path, 'w') as f: f.write(ai_content) @@ -514,7 +512,7 @@ def apply_best_practice(best_practice_id, use_ai_flag, model, repo_url = None, e logging.warning(f"AI generation failed for best practice {best_practice_id}") elif best_practice_id == 'SLIM-4.1': applied_file_path = download_and_place_file(git_repo, uri, '.github/ISSUE_TEMPLATE/bug_report.md') - if use_ai_flag: + if use_ai_flag and model: #logging.warning(f"AI apply features unsupported for best practice {best_practice_id} currently") # Custom AI processing code to go here using and modifying applied_file_path content @@ -527,7 +525,7 @@ def apply_best_practice(best_practice_id, use_ai_flag, model, repo_url = None, e logging.warning(f"AI generation failed for best practice {best_practice_id}") elif best_practice_id == 'SLIM-4.2': applied_file_path = download_and_place_file(git_repo, uri, '.github/ISSUE_TEMPLATE/bug_report.yml') - if use_ai_flag: + if use_ai_flag and model: #logging.warning(f"AI apply features unsupported for best practice {best_practice_id} currently") # Custom AI processing code to go here using and modifying applied_file_path content @@ -540,7 +538,7 @@ def apply_best_practice(best_practice_id, use_ai_flag, model, repo_url = None, e logging.warning(f"AI generation failed for best practice {best_practice_id}") elif best_practice_id == 'SLIM-4.3': applied_file_path = download_and_place_file(git_repo, uri, '.github/ISSUE_TEMPLATE/new_feature.md') - if use_ai_flag: + if use_ai_flag and model: #logging.warning(f"AI apply features unsupported for best practice {best_practice_id} currently") # Custom AI processing code to go here using and modifying applied_file_path content @@ -553,7 +551,7 @@ def apply_best_practice(best_practice_id, use_ai_flag, model, repo_url = None, e logging.warning(f"AI generation failed for best practice {best_practice_id}") elif best_practice_id == 'SLIM-4.4': applied_file_path = download_and_place_file(git_repo, uri, '.github/ISSUE_TEMPLATE/new_feature.yml') - if use_ai_flag: + if use_ai_flag and model: #logging.warning(f"AI apply features unsupported for best practice {best_practice_id} currently") # Custom AI processing code to go here using and modifying applied_file_path content @@ -566,7 +564,7 @@ def apply_best_practice(best_practice_id, use_ai_flag, model, repo_url = None, e logging.warning(f"AI generation failed for best practice {best_practice_id}") elif best_practice_id == 'SLIM-5.1': applied_file_path = download_and_place_file(git_repo, uri, 'CHANGELOG.md') - if use_ai_flag: + if use_ai_flag and model: ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path, model) if ai_content: with open(applied_file_path, 'w') as f: @@ -576,7 +574,7 @@ def apply_best_practice(best_practice_id, use_ai_flag, model, repo_url = None, e logging.warning(f"AI generation failed for best practice {best_practice_id}") elif best_practice_id == 'SLIM-7.1': applied_file_path = download_and_place_file(git_repo, uri, '.github/PULL_REQUEST_TEMPLATE.md') - if use_ai_flag: + if use_ai_flag and model: ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path, model) if ai_content: with open(applied_file_path, 'w') as f: @@ -586,7 +584,7 @@ def apply_best_practice(best_practice_id, use_ai_flag, model, repo_url = None, e logging.warning(f"AI generation failed for best practice {best_practice_id}") elif best_practice_id == 'SLIM-8.1': applied_file_path = download_and_place_file(git_repo, uri, 'CODE_OF_CONDUCT.md') - if use_ai_flag: + if use_ai_flag and model: #logging.warning(f"AI apply features unsupported for best practice {best_practice_id} currently") # Custom AI processing code to go here using and modifying applied_file_path content @@ -599,7 +597,7 @@ def apply_best_practice(best_practice_id, use_ai_flag, model, repo_url = None, e logging.warning(f"AI generation failed for best practice {best_practice_id}") elif best_practice_id == 'SLIM-9.1': applied_file_path = download_and_place_file(git_repo, uri, 'CONTRIBUTING.md') - if use_ai_flag: + if use_ai_flag and model: ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path, model) if ai_content: with open(applied_file_path, 'w') as f: @@ -609,7 +607,7 @@ def apply_best_practice(best_practice_id, use_ai_flag, model, repo_url = None, e logging.warning(f"AI generation failed for best practice {best_practice_id}") elif best_practice_id == 'SLIM-13.1': applied_file_path = download_and_place_file(git_repo, uri, 'TESTING.md') - if use_ai_flag: + if use_ai_flag and model: ai_content = use_ai(best_practice_id, git_repo.working_tree_dir, applied_file_path, model) if ai_content: with open(applied_file_path, 'w') as f: @@ -837,7 +835,7 @@ def create_parser(): parser_apply.set_defaults(func=lambda args: apply_best_practices( best_practice_ids=args.best_practice_ids, use_ai_flag=bool(args.use_ai), - model=args.use_ai, + model=args.use_ai if args.use_ai else None, repo_urls=args.repo_urls, existing_repo_dir=args.repo_dir, target_dir_to_clone_to=args.clone_to_dir @@ -870,7 +868,7 @@ def create_parser(): parser_apply_deploy.set_defaults(func=lambda args: apply_and_deploy_best_practices( best_practice_ids=args.best_practice_ids, use_ai_flag=bool(args.use_ai), - model=args.use_ai, + model=args.use_ai if args.use_ai else None, remote_name=args.remote_name, commit_message=args.commit_message, repo_urls=args.repo_urls, From 518fa75f7fa9cab72e4bd0b1e9c1d37b632159f3 Mon Sep 17 00:00:00 2001 From: Kyongsik Yun Date: Sat, 17 Aug 2024 20:27:05 -0700 Subject: [PATCH 08/10] fixed generate_with_ollama function --- slim-cli.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/slim-cli.py b/slim-cli.py index 6466eb6..02f91b4 100644 --- a/slim-cli.py +++ b/slim-cli.py @@ -296,13 +296,28 @@ def generate_with_openai(prompt: str, model_name: str) -> Optional[str]: #return str(response.choices[0].message.content) def generate_with_ollama(prompt: str, model_name: str) -> Optional[str]: + import ollama + try: - response = subprocess.run(['ollama', 'run', model_name, prompt], capture_output=True, text=True, check=True) - return response.stdout.strip() - except subprocess.CalledProcessError as e: + response = ollama.chat(model=model_name, messages=[ + { + 'role': 'user', + 'content': prompt, + }, + ]) + print(response['message']['content']) + return (response['message']['content']) + except Exception as e: logging.error(f"Error running Ollama model: {e}") return None + #try: + # response = subprocess.run(['ollama', 'run', model_name, prompt], capture_output=True, text=True, check=True) + # return response.stdout.strip() + #except subprocess.CalledProcessError as e: + # logging.error(f"Error running Ollama model: {e}") + # return None + def download_and_place_file(repo, url, filename, target_relative_path_in_repo=''): # Create the full path where the file will be saved. By default write to root. target_directory = os.path.join(repo.working_tree_dir, target_relative_path_in_repo) From bae545518d0f919046a934cf0011a002f92f12a2 Mon Sep 17 00:00:00 2001 From: Rishi Verma Date: Mon, 19 Aug 2024 01:01:47 -0700 Subject: [PATCH 09/10] Added reqs, usage --- requirements.txt | 3 ++- slim-cli.py | 7 +++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index 2c70e43..6f6dfbc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,4 +4,5 @@ openai python-dotenv rich gitpython -azure-identity \ No newline at end of file +azure-identity +ollama \ No newline at end of file diff --git a/slim-cli.py b/slim-cli.py index 02f91b4..eea1f0c 100644 --- a/slim-cli.py +++ b/slim-cli.py @@ -93,6 +93,9 @@ def create_slim_registry_dictionary(practices): i += 1 return asset_mapping +def get_ai_model_pairs(supported_models): + # return a list of "key/value" pairs + return [f"{key}/{model}" for key, models in supported_models.items() for model in models] def use_ai(best_practice_id: str, repo_path: str, template_path: str, model: str = "openai/gpt-4o") -> Optional[str]: """ @@ -845,7 +848,7 @@ def create_parser(): parser_apply.add_argument('--repo-dir', required=False, help='Repository directory location on local machine. Only one repository supported') parser_apply.add_argument('--clone-to_dir', required=False, help='Local path to clone repository to. Compatible with --repo-urls') #parser_apply.add_argument('--use-ai', action='store_true', help='Automatically customize the application of the best practice') - parser_apply.add_argument('--use-ai', metavar='MODEL', help='Automatically customize the application of the best practice with the specified AI model (e.g., openai/gpt-4o') + parser_apply.add_argument('--use-ai', metavar='MODEL', help=f"Automatically customize the application of the best practice with an AI model. Support for: {get_ai_model_pairs(SUPPORTED_MODELS)}") #parser_apply.add_argument('--model', required=False, help='Model name (openai/gpt-4o) for using ai') parser_apply.set_defaults(func=lambda args: apply_best_practices( best_practice_ids=args.best_practice_ids, @@ -875,7 +878,7 @@ def create_parser(): parser_apply_deploy.add_argument('--repo-urls', nargs='+', required=False, help='Repository URLs to apply to. Do not use if --repo-dir specified') parser_apply_deploy.add_argument('--repo-dir', required=False, help='Repository directory location on local machine. Only one repository supported') parser_apply_deploy.add_argument('--clone-to-dir', required=False, help='Local path to clone repository to. Compatible with --repo-urls') - parser_apply_deploy.add_argument('--use-ai', metavar='MODEL', help='Automatically customize the application of the best practice with the specified AI model') + parser_apply_deploy.add_argument('--use-ai', metavar='MODEL', help='Automatically customize the application of the best practice with the specified AI model. Support for: {get_ai_model_pairs(SUPPORTED_MODELS)}') #parser_apply_deploy.add_argument('--use-ai', action='store_true', help='Automatically customize the application of the best practice') #parser_apply_deploy.add_argument('--model', required=False, help='Model name (ollama/gpt-4o) for using ai') parser_apply_deploy.add_argument('--remote-name', required=False, default=GIT_DEFAULT_REMOTE_NAME, help=f"Name of the remote to push changes to. Default: '{GIT_DEFAULT_REMOTE_NAME}") From c95b4a46e2adfac87516586636afea4a9ee7f8c5 Mon Sep 17 00:00:00 2001 From: Rishi Verma Date: Mon, 19 Aug 2024 01:13:59 -0700 Subject: [PATCH 10/10] Updated README --- README.md | 26 +++++++------------------- 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index 347d3b8..df94af1 100644 --- a/README.md +++ b/README.md @@ -1,29 +1,18 @@ - -🚧 This repo is under construction. Do not use. 🚧 -
-[INSERT YOUR LOGO IMAGE HERE (IF APPLICABLE)] - - -

SLIM CLI Tool

-
Automate the application of best practices to your git repositories
- - -[INSERT YOUR BADGES HERE (SEE: https://shields.io)] [![SLIM](https://img.shields.io/badge/Best%20Practices%20from-SLIM-blue)](https://nasa-ammos.github.io/slim/) +[![SLIM](https://img.shields.io/badge/Best%20Practices%20from-SLIM-blue)](https://nasa-ammos.github.io/slim/) -[INSERT SCREENSHOT OF YOUR SOFTWARE, IF APPLICABLE] - +slim-cli-screen SLIM CLI is a command-line tool designed to infuse SLIM best practices seamlessly with your development workflow. It fetches and applies structured SLIM best practices directly into your Git repositories. The tool leverages artificial intelligence capabilities to customize and tailor the application of SLIM best practices based on your repository's specifics. @@ -31,10 +20,10 @@ SLIM CLI is a command-line tool designed to infuse SLIM best practices seamlessl ## Features -- Command-line interface for applying SLIM best practices into Git development workflows -- Fetches the latest SLIM best practices dynamically from SLIM's registry -- Allows customization of best practices using advanced AI models before applying them to repositories -- Deploys, or git adds, commits, and pushes changes to your repository's remote +- Command-line interface for applying SLIM best practices into Git development workflows. +- Fetches the latest SLIM best practices dynamically from SLIM's registry. +- Allows customization of best practices using advanced AI models before applying them to repositories. +- Deploys, or git adds, commits, and pushes changes to your repository's remote. ## Contents @@ -94,7 +83,7 @@ This section provides detailed commands to interact with the SLIM CLI. Each comm ``` - To apply a best practice using AI customization: ```bash - python slim_cli.py apply --best-practice-ids SLIM-3.1 --repo-urls https://github.com/your_org/your_repo.git --use-ai openai/gpt-4o + python slim-cli.py apply --best-practice-ids SLIM-123 --repo-urls https://github.com/your_org/your_repo.git --use-ai openai/gpt-4o ``` 3. **Deploy a best practice** @@ -127,7 +116,6 @@ This section provides detailed commands to interact with the SLIM CLI. Each comm Each command can be modified with additional flags as needed for more specific tasks or environments. - ## Changelog See our [CHANGELOG.md](CHANGELOG.md) for a history of our changes.