Skip to content

Commit

Permalink
docker setup more small fix 2
Browse files Browse the repository at this point in the history
  • Loading branch information
HMDCrew committed Oct 8, 2023
1 parent f4303d3 commit fece818
Show file tree
Hide file tree
Showing 28 changed files with 289 additions and 125 deletions.
4 changes: 3 additions & 1 deletion autogpts/autogpt/autogpt/models/command.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,4 +65,6 @@ def __str__(self) -> str:
f"{param.name}: {param.spec.type.value if param.spec.required else f'Optional[{param.spec.type.value}]'}"
for param in self.parameters
]
return f"{self.name}: {self.description.rstrip('.')}. Params: ({', '.join(params)})"
return f"{self.name}: \
{self.description.rstrip('.')}. \
Params: ({', '.join(params)})"
27 changes: 18 additions & 9 deletions autogpts/autogpt/autogpt/plugins/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,8 @@ def fetch_openai_plugins_manifest_and_spec(config: Config) -> dict:
manifest = response.json()
if manifest["schema_version"] != "v1":
logger.warn(
f"Unsupported manifest version: {manifest['schem_version']} for {url}"
f"Unsupported manifest version: \
{manifest['schem_version']} for {url}"
)
continue
if manifest["api"]["type"] != "openapi":
Expand Down Expand Up @@ -234,14 +235,16 @@ def scan_plugins(config: Config, debug: bool = False) -> List[AutoGPTPluginTempl

try:
__import__(qualified_module_name)
except:
except Exception:
logger.error(f"Failed to load {qualified_module_name}")
continue
plugin = sys.modules[qualified_module_name]

if not plugins_config.is_enabled(plugin_module_name):
logger.warn(
f"Plugin folder {plugin_module_name} found but not configured. If this is a legitimate plugin, please add it to plugins_config.yaml (key: {plugin_module_name})."
f"Plugin folder {plugin_module_name} found but not configured. \
If this is a legitimate plugin, please add it to \
plugins_config.yaml (key: {plugin_module_name})."
)
continue

Expand All @@ -262,7 +265,7 @@ def scan_plugins(config: Config, debug: bool = False) -> List[AutoGPTPluginTempl
zipped_package = zipimporter(str(plugin))
try:
zipped_module = zipped_package.load_module(str(module.parent))
except:
except Exception:
logger.error(f"Failed to load {str(module.parent)}")

for key in dir(zipped_module):
Expand All @@ -283,24 +286,30 @@ def scan_plugins(config: Config, debug: bool = False) -> List[AutoGPTPluginTempl

if plugin_configured and plugin_enabled:
logger.debug(
f"Loading plugin {plugin_name}. Enabled in plugins_config.yaml."
f"Loading plugin {plugin_name}. \
Enabled in plugins_config.yaml."
)
loaded_plugins.append(a_module())
elif plugin_configured and not plugin_enabled:
logger.debug(
f"Not loading plugin {plugin_name}. Disabled in plugins_config.yaml."
f"Not loading plugin {plugin_name}. \
Disabled in plugins_config.yaml."
)
elif not plugin_configured:
logger.warn(
f"Not loading plugin {plugin_name}. Key '{plugin_name}' was not found in plugins_config.yaml. "
f"Zipped plugins should use the class name ({plugin_name}) as the key."
f"Not loading plugin {plugin_name}. \
Key '{plugin_name}' was not found in \
plugins_config.yaml. "
f"Zipped plugins should use the \
class name ({plugin_name}) as the key."
)
else:
if (
module_name := getattr(a_module, "__name__", str(a_module))
) != "AutoGPTPluginTemplate":
logger.debug(
f"Skipping '{module_name}' because it doesn't subclass AutoGPTPluginTemplate."
f"Skipping '{module_name}' because it \
doesn't subclass AutoGPTPluginTemplate."
)

# OpenAI plugins
Expand Down
22 changes: 15 additions & 7 deletions autogpts/autogpt/autogpt/processing/text.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,14 @@
def batch(
sequence: list[T], max_batch_length: int, overlap: int = 0
) -> Iterator[list[T]]:
"""Batch data from iterable into slices of length N. The last batch may be shorter."""
"""
Batch data from iterable into slices of length N. The last batch may be shorter.
"""
# batched('ABCDEFG', 3) --> ABC DEF G
if max_batch_length < 1:
raise ValueError("n must be at least one")
for i in range(0, len(sequence), max_batch_length - overlap):
yield sequence[i : i + max_batch_length]
yield sequence[i: i + max_batch_length]


def chunk_content(
Expand Down Expand Up @@ -62,7 +64,9 @@ async def summarize_text(
Args:
text (str): The text to summarize
config (Config): The config object
instruction (str): Additional instruction for summarization, e.g. "focus on information related to polar bears", "omit personal information contained in the text"
instruction (str): Additional instruction for summarization,
e.g. "focus on information related to polar bears",
"omit personal information contained in the text"
question (str): Question to answer in the summary
Returns:
Expand All @@ -80,7 +84,8 @@ async def summarize_text(

if question:
instruction = (
f'include any information that can be used to answer the question "{question}". '
f'include any information that can be used to answer the \
question "{question}". '
"Do not directly answer the question itself"
)

Expand All @@ -103,7 +108,8 @@ async def summarize_text(
f'LITERAL TEXT: """{text}"""'
"\n\n\n"
"CONCISE SUMMARY: The text is best summarized as"
# "Only respond with a concise summary or description of the user message."
# "Only respond with a concise summary or description
# of the user message."
)
)

Expand Down Expand Up @@ -160,7 +166,9 @@ def split_text(
tokenizer: ModelTokenizer,
with_overlap: bool = True,
) -> Iterator[tuple[str, int]]:
"""Split text into chunks of sentences, with each chunk not exceeding the maximum length
"""
Split text into chunks of sentences, with each chunk not exceeding the
maximum length
Args:
text (str): The text to split
Expand Down Expand Up @@ -236,7 +244,7 @@ def split_text(
current_chunk_length += sentence_length

else: # sentence longer than maximum length -> chop up and try again
sentences[i : i + 1] = [
sentences[i: i + 1] = [
chunk
for chunk, _ in chunk_content(sentence, target_chunk_length, tokenizer)
]
Expand Down
35 changes: 25 additions & 10 deletions autogpts/autogpt/autogpt/prompts/default_prompts.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,44 @@
#########################Setup.py#################################
# ------------------------Setup.py------------------------

DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC = """
Your task is to devise up to 5 highly effective goals and an appropriate role-based name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned with the successful completion of its assigned task.
Your task is to devise up to 5 highly effective goals and an appropriate role-based
name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned
with the successful completion of its assigned task.
The user will provide the task, you will provide only the output in the exact format specified below with no explanation or conversation.
The user will provide the task, you will provide only the output in the exact format
specified below with no explanation or conversation.
Example input:
Help me with marketing my business
Example output:
Name: CMOGPT
Description: a professional digital marketer AI that assists Solopreneurs in growing their businesses by providing world-class expertise in solving marketing problems for SaaS, content products, agencies, and more.
Description: a professional digital marketer AI that assists Solopreneurs
in growing their businesses by providing world-class expertise in solving
marketing problems for SaaS, content products, agencies, and more.
Goals:
- Engage in effective problem-solving, prioritization, planning, and supporting execution to address your marketing needs as your virtual Chief Marketing Officer.
- Engage in effective problem-solving, prioritization, planning,
and supporting execution to address your marketing needs as
your virtual Chief Marketing Officer.
- Provide specific, actionable, and concise advice to help you make informed decisions without the use of platitudes or overly wordy explanations.
- Provide specific, actionable, and concise advice to help you make informed decisions
without the use of platitudes or overly wordy explanations.
- Identify and prioritize quick wins and cost-effective campaigns that maximize results with minimal time and budget investment.
- Identify and prioritize quick wins and cost-effective campaigns that maximize
results with minimal time and budget investment.
- Proactively take the lead in guiding you and offering suggestions when faced with unclear information or uncertainty to ensure your marketing strategy remains on track.
- Proactively take the lead in guiding you and offering suggestions when
faced with unclear information or uncertainty to ensure your marketing
strategy remains on track.
"""

DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC = (
"Task: '{{user_prompt}}'\n"
"Respond only with the output in the exact format specified in the system prompt, with no explanation or conversation.\n"
"Respond only with the output in the exact format specified in the system prompt,"
"with no explanation or conversation.\n"
)

DEFAULT_USER_DESIRE_PROMPT = "Write a wikipedia style article about the project: https://github.com/significant-gravitas/AutoGPT" # Default prompt
DEFAULT_USER_DESIRE_PROMPT = (
"Write a wikipedia style article about the project: "
"https://github.com/significant-gravitas/AutoGPT" # Default prompt
)
2 changes: 1 addition & 1 deletion autogpts/autogpt/autogpt/prompts/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,6 @@ def format_numbered_list(items: list[Any], start_at: int = 1) -> str:


def indent(content: str, indentation: int | str = 4) -> str:
if type(indentation) == int:
if type(indentation) is int:
indentation = " " * indentation
return indentation + content.replace("\n", f"\n{indentation}") # type: ignore
3 changes: 2 additions & 1 deletion autogpts/autogpt/autogpt/speech/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,8 @@ def say(self, text: str, voice_index: int = 0) -> bool:
voice_index (int): The index of the voice to use.
"""
text = re.sub(
r"\b(?:https?://[-\w_.]+/?\w[-\w_.]*\.(?:[-\w_.]+/?\w[-\w_.]*\.)?[a-z]+(?:/[-\w_.%]+)*\b(?!\.))",
r"\b(?:https?://[-\w_.]+/?\w[-\w_.]*\.(?:[-\w_.]+/?\w[-\w_.]*\.)?\
[a-z]+(?:/[-\w_.%]+)*\b(?!\.))",
"",
text,
)
Expand Down
3 changes: 2 additions & 1 deletion autogpts/autogpt/autogpt/speech/say.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,8 @@ def _speak() -> None:
thread.start()

def __repr__(self):
return f"{self.__class__.__name__}(enabled={self._config.speak_mode}, provider={self._voice_engine.__class__.__name__})"
return f"{self.__class__.__name__}(enabled={self._config.speak_mode},\
provider={self._voice_engine.__class__.__name__})"

@staticmethod
def _get_voice_engine(config: Config) -> tuple[VoiceBase, VoiceBase]:
Expand Down
11 changes: 7 additions & 4 deletions autogpts/autogpt/autogpt/url_utils/validators.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,19 @@
from typing import Any, Callable
from urllib.parse import urljoin, urlparse

from requests.compat import urljoin
# from requests.compat import urljoin


def validate_url(func: Callable[..., Any]) -> Any:
"""The method decorator validate_url is used to validate urls for any command that requires
a url as an argument"""
"""
The method decorator validate_url is used to validate urls for any command that
requires a url as an argument"""

@functools.wraps(func)
def wrapper(url: str, *args, **kwargs) -> Any:
"""Check if the URL is valid using a basic check, urllib check, and local file check
"""
Check if the URL is valid using a basic check, urllib check,
and local file check
Args:
url (str): The URL to check
Expand Down
11 changes: 7 additions & 4 deletions autogpts/autogpt/autogpt/workspace/workspace.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,14 +124,16 @@ def _sanitize_path(

logger.debug(f"Resolved root as '{root}'")

# Allow exception for absolute paths if they are contained in your workspace directory.
# Allow exception for absolute paths if they are contained in your workspace
# directory.
if (
relative_path.is_absolute()
and restrict_to_root
and not relative_path.is_relative_to(root)
):
raise ValueError(
f"Attempted to access absolute path '{relative_path}' in workspace '{root}'."
f"Attempted to access absolute path '{relative_path}'\
in workspace '{root}'."
)

full_path = root.joinpath(relative_path).resolve()
Expand All @@ -140,7 +142,8 @@ def _sanitize_path(

if restrict_to_root and not full_path.is_relative_to(root):
raise ValueError(
f"Attempted to access path '{full_path}' outside of workspace '{root}'."
f"Attempted to access path '{full_path}'\
outside of workspace '{root}'."
)

return full_path
Expand All @@ -159,7 +162,7 @@ def init_workspace_directory(
) -> Path:
if override_workspace_path is None:
workspace_path = config.workdir / "auto_gpt_workspace"
elif type(override_workspace_path) == str:
elif type(override_workspace_path) is str:
workspace_path = Path(override_workspace_path)
else:
workspace_path = override_workspace_path
Expand Down
4 changes: 2 additions & 2 deletions autogpts/autogpt/scripts/install_plugin_deps.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def install_plugin_dependencies():
"""
plugins_dir = Path(os.getenv("PLUGINS_DIR", "plugins"))

logger.debug(f"Checking for dependencies in zipped plugins...")
logger.debug("Checking for dependencies in zipped plugins...")

# Install zip-based plugins
for plugin_archive in plugins_dir.glob("*.zip"):
Expand Down Expand Up @@ -49,7 +49,7 @@ def install_plugin_dependencies():
os.remove(extracted)
os.rmdir(os.path.join(plugins_dir, basedir))

logger.debug(f"Checking for dependencies in other plugin folders...")
logger.debug("Checking for dependencies in other plugin folders...")

# Install directory-based plugins
for requirements_file in glob(f"{plugins_dir}/*/requirements.txt"):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,12 @@

CYCLE_COUNT = 2
USER_INPUTS = [
"Use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer the question 'What is the price of the book?'\nWrite the price of the book to a file named 'browse_website.txt'.'\nUse the task_complete command to complete the task.\nDo not use any other commands."
"Use the browse_website command to visit "
"http://books.toscrape.com/catalogue/meditations_33/index.html and answer "
"the question 'What is the price of the book?'\n"
"Write the price of the book to a file named 'browse_website.txt'.'\n"
"Use the task_complete command to complete the task.\n"
"Do not use any other commands."
]


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@
]
USER_INPUTS = [
"Write 'Hello World' into a file named \"hello_world.txt\".",
'Write \'Hello World\' into 2 files named "hello_world_1.txt"and "hello_world_2.txt".',
'Write \'Hello World\' into 2 files named "hello_world_1.txt"'
'and "hello_world_2.txt".',
]


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,11 @@
1 # we will attempt to beat 1 level above the current level for now.
)

CHALLENGE_FAILED_MESSAGE = "Challenges can sometimes fail randomly, please run this test again and if it fails reach out to us on https://discord.gg/autogpt in the 'challenges' channel to let us know the challenge you're struggling with."
CHALLENGE_FAILED_MESSAGE = (
"Challenges can sometimes fail randomly, please run this test again and if it "
"fails reach out to us on https://discord.gg/autogpt in the 'challenges' "
"channel to let us know the challenge you're struggling with."
)


def challenge() -> Callable[[Callable[..., Any]], Callable[..., None]]:
Expand Down
2 changes: 1 addition & 1 deletion autogpts/autogpt/tests/challenges/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def pytest_configure(config: Config) -> None:

@pytest.fixture
def level_to_run(request: FixtureRequest) -> int:
## used for challenges in the goal oriented tests
# used for challenges in the goal oriented tests
return request.config.option.level


Expand Down
2 changes: 1 addition & 1 deletion autogpts/autogpt/tests/challenges/debug_code/data/code.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
def two_sum(nums: List, target: int) -> Optional[List[int]]:
seen = {}
for i, num in enumerate(nums):
typo
# typo
complement = target - num
if complement in seen:
return [seen[complement], i]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,15 @@
CODE_FILE_PATH = "code.py"
TEST_FILE_PATH = "test.py"
USER_INPUTS = [
"1- Run test.py using the execute_python_file command.\n2- Read code.py using the read_file command.\n3- Modify code.py using the write_to_file command.\nRepeat step 1, 2 and 3 until test.py runs without errors. Do not modify the test.py file.",
"1- Run test.py.\n2- Read code.py.\n3- Modify code.py.\nRepeat step 1, 2 and 3 until test.py runs without errors.\n",
"1- Run test.py using the execute_python_file command.\n"
"2- Read code.py using the read_file command.\n"
"3- Modify code.py using the write_to_file command.\n"
"Repeat step 1, 2 and 3 until test.py runs without errors. "
"Do not modify the test.py file.",
"1- Run test.py.\n"
"2- Read code.py.\n"
"3- Modify code.py.\n"
"Repeat step 1, 2 and 3 until test.py runs without errors.\n",
"Make test.py run without errors.",
]

Expand Down Expand Up @@ -65,4 +72,5 @@ def test_debug_code_challenge_a(
for expected_value in EXPECTED_VALUES:
assert (
expected_value in output
), f"Expected output to contain {expected_value}, but it was not found in {output}!"
), f"Expected output to contain {expected_value},\
but it was not found in {output}!"
Loading

0 comments on commit fece818

Please sign in to comment.