Skip to content

Commit

Permalink
Fixed Publih
Browse files Browse the repository at this point in the history
  • Loading branch information
orionhunts-ai committed Jul 19, 2024
1 parent 869897f commit b832924
Show file tree
Hide file tree
Showing 34 changed files with 1,874 additions and 134 deletions.
Binary file modified .DS_Store
Binary file not shown.
8 changes: 4 additions & 4 deletions .github/workflows/build_utils.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ jobs:
export PATH="$HOME/.local/bin:$PATH"
poetry build
- name: Publish to PyPI
run: |
export PATH="$HOME/.local/bin:$PATH"
poetry publish -- ${{ secrets.PYPI_USERNAME }} --password ${{ secrets.PYPI_PASSWORD }}
- name: Publish to PyPI
run: |
export PATH="$HOME/.local/bin:$PATH"
poetry publish -u ${{ secrets.PYPI_USERNAME }} -p ${{ secrets.PYPI_PASSWORD }}
4 changes: 4 additions & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
{
"files.autoSave": "afterDelay",
"makefile.defaultLaunchConfiguration": null
}
Binary file modified dist/synutils-0.0.1-py3-none-any.whl
Binary file not shown.
Binary file modified dist/synutils-0.0.1.tar.gz
Binary file not shown.
3 changes: 3 additions & 0 deletions generate-tests/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
__pycache__/
*.env
.venv/
Empty file added generate-tests/README.rst
Empty file.
Empty file.
19 changes: 19 additions & 0 deletions generate-tests/generate_tests/config.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
{
"languages": {
"python3": {
"source_dir": "generate_tests/generate_tests",
"test_dir": "generate_tests/tests",
"test_framework": "pytest"
},
"ts": {
"source_dir": "generate_tests/src",
"test_dir": "generate_tests/tests",
"test_framework": "jest"
},
"rs": {
"source_dir": "generate_tests/src",
"test_dir": "generate_tests/tests",
"test_framework": "cargo test"
}
}
}
105 changes: 105 additions & 0 deletions generate-tests/generate_tests/generate_tests.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
import openai
import os
import json
import ast
import shutil

# Set your OpenAI API key
openai.api_key = os.getenv('OPENAI_API_KEY')

# Load configuration
with open('config.json', 'r') as file:
config = json.load(file)

# Load prompt examples
from prompt_examples import python_examples, ts_examples, rs_examples

examples = {
"python3": python_examples,
"ts": ts_examples,
"rs": rs_examples
}

def get_existing_utilities(tracker_path):
if os.path.exists(tracker_path):
with open(tracker_path, 'r') as file:
return json.load(file)
return {}

def update_existing_utilities(tracker_path, utilities):
with open(tracker_path, 'w') as file:
json.dump(utilities, file, indent=4)

def extract_utilities_from_code(lang, code):
if lang == "python3":
tree = ast.parse(code)
return [node.name for node in ast.walk(tree) if isinstance(node, (ast.FunctionDef, ast.ClassDef))]
# Add other language parsing if necessary
return []

def generate_tests(lang, util_code, examples):
prompt = f"""
Below is a utility function or class in {lang}:
{util_code}
Write test cases for the above utility function or class using {config['languages'][lang]['test_framework']}.
{examples}
"""

response = openai.Completion.create(
model="gpt-4",
prompt=prompt,
max_tokens=300,
n=1,
stop=None,
temperature=0.7
)

return response.choices[0].text.strip()

def main():
for lang, settings in config['languages'].items():
source_dir = settings['source_dir']
test_dir = settings['test_dir']
tracker_path = os.path.join(test_dir, 'utils_tracker.json')

# Load existing utilities from tracker
existing_utilities = get_existing_utilities(tracker_path)

# Read source code
utils_files = [f for f in os.listdir(source_dir) if f.endswith(('.py', '.ts', '.rs'))]
for utils_file in utils_files:
with open(os.path.join(source_dir, utils_file), 'r') as file:
utils_code = file.read()

# Extract utilities from source code
current_utilities = extract_utilities_from_code(lang, utils_code)

new_tests = []

for util in current_utilities:
if util not in existing_utilities:
# Generate tests for the new utility
util_code = f"def {util}(): pass" # Simplified for illustration
tests = generate_tests(lang, util_code, examples[lang])
new_tests.append(tests)
existing_utilities[util] = True

# Update the tracker with new utilities
update_existing_utilities(tracker_path, existing_utilities)

# Append new tests to the appropriate test file
if new_tests:
test_file_path = os.path.join(test_dir, f'test_{os.path.splitext(utils_file)[0]}.py' if lang == 'python3' else f'test_{os.path.splitext(utils_file)[0]}.ts')
with open(test_file_path, 'a') as file:
for test in new_tests:
file.write('\n\n' + test)

print(f"New tests generated and appended to {test_file_path}")
else:
print(f"No new utilities found in {utils_file}. No tests generated.")

if __name__ == "__main__":
main()
39 changes: 39 additions & 0 deletions generate-tests/generate_tests/prompt_examples.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
python_examples = """
Example Python tests using pytest
def test_add():
assert add(2, 3) == 5
assert add(-1, 1) == 0
def test_subtract():
assert subtract(3, 2) == 1
assert subtract(1, -1) == 2
"""

ts_examples = """
Example TypeScript tests using Jest
test('adds 1 + 2 to equal 3', () => {
expect(add(1, 2)).toBe(3);
});
test('subtracts 5 - 2 to equal 3', () => {
expect(subtract(5, 2)).toBe(3);
});
"""

rs_examples = """
Example Rust tests using cargo test
#[test]
fn test_add() {
assert_eq!(add(2, 3), 5);
assert_eq!(add(-1, 1), 0);
}
#[test]
fn test_subtract() {
assert_eq!(subtract(3, 2), 1);
assert_eq!(subtract(1, -1), 2);
}
"""
1 change: 1 addition & 0 deletions generate-tests/generate_tests/utilities_tracker.json
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{}
Empty file.
Loading

0 comments on commit b832924

Please sign in to comment.