diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 68b6fc2b7c27..18191139f72e 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -7,6 +7,9 @@ updates: interval: "weekly" open-pull-requests-limit: 10 target-branch: "dev" + commit-message: + prefix: "chore(libs/deps)" + prefix-development: "chore(libs/deps-dev)" groups: production-dependencies: dependency-type: "production" @@ -26,6 +29,9 @@ updates: interval: "weekly" open-pull-requests-limit: 10 target-branch: "dev" + commit-message: + prefix: "chore(backend/deps)" + prefix-development: "chore(backend/deps-dev)" groups: production-dependencies: dependency-type: "production" @@ -38,7 +44,6 @@ updates: - "minor" - "patch" - # frontend (Next.js project) - package-ecosystem: "npm" directory: "autogpt_platform/frontend" @@ -46,6 +51,9 @@ updates: interval: "weekly" open-pull-requests-limit: 10 target-branch: "dev" + commit-message: + prefix: "chore(frontend/deps)" + prefix-development: "chore(frontend/deps-dev)" groups: production-dependencies: dependency-type: "production" @@ -58,7 +66,6 @@ updates: - "minor" - "patch" - # infra (Terraform) - package-ecosystem: "terraform" directory: "autogpt_platform/infra" @@ -66,6 +73,10 @@ updates: interval: "weekly" open-pull-requests-limit: 5 target-branch: "dev" + commit-message: + prefix: "chore(infra/deps)" + prefix-development: "chore(infra/deps-dev)" + groups: production-dependencies: dependency-type: "production" @@ -78,7 +89,6 @@ updates: - "minor" - "patch" - # market (Poetry project) - package-ecosystem: "pip" directory: "autogpt_platform/market" @@ -86,6 +96,9 @@ updates: interval: "weekly" open-pull-requests-limit: 10 target-branch: "dev" + commit-message: + prefix: "chore(market/deps)" + prefix-development: "chore(market/deps-dev)" groups: production-dependencies: dependency-type: "production" @@ -146,6 +159,9 @@ updates: interval: "weekly" open-pull-requests-limit: 1 target-branch: "dev" + commit-message: + prefix: "chore(platform/deps)" + prefix-development: "chore(platform/deps-dev)" groups: production-dependencies: dependency-type: "production" @@ -166,6 +182,8 @@ updates: interval: "weekly" open-pull-requests-limit: 1 target-branch: "dev" + commit-message: + prefix: "chore(docs/deps)" groups: production-dependencies: dependency-type: "production" diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client_test.py b/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client_test.py index b9d1e5c96aa1..8fccfb28b501 100644 --- a/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client_test.py +++ b/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client_test.py @@ -1,7 +1,8 @@ import pytest -from autogpt_libs.feature_flag.client import feature_flag, mock_flag_variation from ldclient import LDClient +from autogpt_libs.feature_flag.client import feature_flag, mock_flag_variation + @pytest.fixture def ld_client(mocker): diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/config.py b/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/config.py index c8040a41bb8c..e01c285d1e66 100644 --- a/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/config.py +++ b/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/config.py @@ -6,7 +6,7 @@ class Settings(BaseSettings): launch_darkly_sdk_key: str = Field( default="", description="The Launch Darkly SDK key", - validation_alias="LAUNCH_DARKLY_SDK_KEY" + validation_alias="LAUNCH_DARKLY_SDK_KEY", ) model_config = SettingsConfigDict(case_sensitive=True, extra="ignore") diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/utils/__init__.py b/autogpt_platform/autogpt_libs/autogpt_libs/utils/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/autogpt_platform/backend/backend/blocks/jina/fact_checker.py b/autogpt_platform/backend/backend/blocks/jina/fact_checker.py new file mode 100644 index 000000000000..c9b8c08d1db8 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/jina/fact_checker.py @@ -0,0 +1,59 @@ +from urllib.parse import quote + +import requests + +from backend.blocks.jina._auth import ( + JinaCredentials, + JinaCredentialsField, + JinaCredentialsInput, +) +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class FactCheckerBlock(Block): + class Input(BlockSchema): + statement: str = SchemaField( + description="The statement to check for factuality" + ) + credentials: JinaCredentialsInput = JinaCredentialsField() + + class Output(BlockSchema): + factuality: float = SchemaField( + description="The factuality score of the statement" + ) + result: bool = SchemaField(description="The result of the factuality check") + reason: str = SchemaField(description="The reason for the factuality result") + error: str = SchemaField(description="Error message if the check fails") + + def __init__(self): + super().__init__( + id="d38b6c5e-9968-4271-8423-6cfe60d6e7e6", + description="This block checks the factuality of a given statement using Jina AI's Grounding API.", + categories={BlockCategory.SEARCH}, + input_schema=FactCheckerBlock.Input, + output_schema=FactCheckerBlock.Output, + ) + + def run( + self, input_data: Input, *, credentials: JinaCredentials, **kwargs + ) -> BlockOutput: + encoded_statement = quote(input_data.statement) + url = f"https://g.jina.ai/{encoded_statement}" + + headers = { + "Accept": "application/json", + "Authorization": f"Bearer {credentials.api_key.get_secret_value()}", + } + + response = requests.get(url, headers=headers) + response.raise_for_status() + data = response.json() + + if "data" in data: + data = data["data"] + yield "factuality", data["factuality"] + yield "result", data["result"] + yield "reason", data["reason"] + else: + raise RuntimeError(f"Expected 'data' key not found in response: {data}") diff --git a/autogpt_platform/backend/backend/data/credit.py b/autogpt_platform/backend/backend/data/credit.py index c9831694d339..b3f8fbce5d4d 100644 --- a/autogpt_platform/backend/backend/data/credit.py +++ b/autogpt_platform/backend/backend/data/credit.py @@ -70,7 +70,11 @@ class UserCredit(UserCreditBase): async def get_or_refill_credit(self, user_id: str) -> int: cur_time = self.time_now() cur_month = cur_time.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - nxt_month = cur_month.replace(month=cur_month.month + 1) + nxt_month = ( + cur_month.replace(month=cur_month.month + 1) + if cur_month.month < 12 + else cur_month.replace(year=cur_month.year + 1, month=1) + ) user_credit = await UserBlockCredit.prisma().group_by( by=["userId"], diff --git a/autogpt_platform/backend/linter.py b/autogpt_platform/backend/linter.py index 83c574b03494..9bba9d1963b6 100644 --- a/autogpt_platform/backend/linter.py +++ b/autogpt_platform/backend/linter.py @@ -2,6 +2,7 @@ import subprocess directory = os.path.dirname(os.path.realpath(__file__)) +target_dirs = ["../backend", "../autogpt_libs"] def run(*command: str) -> None: @@ -11,17 +12,17 @@ def run(*command: str) -> None: def lint(): try: - run("ruff", "check", ".", "--exit-zero") + run("ruff", "check", *target_dirs, "--exit-zero") run("isort", "--diff", "--check", "--profile", "black", ".") run("black", "--diff", "--check", ".") - run("pyright") + run("pyright", *target_dirs) except subprocess.CalledProcessError as e: print("Lint failed, try running `poetry run format` to fix the issues: ", e) raise e def format(): - run("ruff", "check", "--fix", ".") + run("ruff", "check", "--fix", *target_dirs) run("isort", "--profile", "black", ".") run("black", ".") - run("pyright", ".") + run("pyright", *target_dirs) diff --git a/autogpt_platform/backend/pyproject.toml b/autogpt_platform/backend/pyproject.toml index bcfe089c0755..2fc7d7f5269e 100644 --- a/autogpt_platform/backend/pyproject.toml +++ b/autogpt_platform/backend/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "autogpt-platform-backend" -version = "0.3.3" +version = "0.3.4" description = "A platform for building AI-powered agentic workflows" authors = ["AutoGPT "] readme = "README.md" diff --git a/autogpt_platform/frontend/.env.example b/autogpt_platform/frontend/.env.example index 0fe0b7540292..9b843a754c40 100644 --- a/autogpt_platform/frontend/.env.example +++ b/autogpt_platform/frontend/.env.example @@ -2,6 +2,7 @@ NEXT_PUBLIC_AUTH_CALLBACK_URL=http://localhost:8006/auth/callback NEXT_PUBLIC_AGPT_SERVER_URL=http://localhost:8006/api NEXT_PUBLIC_AGPT_WS_SERVER_URL=ws://localhost:8001/ws NEXT_PUBLIC_AGPT_MARKETPLACE_URL=http://localhost:8015/api/v1/market +NEXT_PUBLIC_APP_ENV=dev ## Supabase credentials @@ -15,4 +16,4 @@ AUTH_CALLBACK_URL=http://localhost:3000/auth/callback GA_MEASUREMENT_ID=G-FH2XK2W4GN # When running locally, set NEXT_PUBLIC_BEHAVE_AS=CLOUD to use the a locally hosted marketplace (as is typical in development, and the cloud deployment), otherwise set it to LOCAL to have the marketplace open in a new tab -NEXT_PUBLIC_BEHAVE_AS=LOCAL \ No newline at end of file +NEXT_PUBLIC_BEHAVE_AS=LOCAL diff --git a/autogpt_platform/frontend/package.json b/autogpt_platform/frontend/package.json index 7378baa62e66..f4437b93c26c 100644 --- a/autogpt_platform/frontend/package.json +++ b/autogpt_platform/frontend/package.json @@ -1,6 +1,6 @@ { "name": "frontend", - "version": "0.3.3", + "version": "0.3.4", "private": true, "scripts": { "dev": "next dev", diff --git a/autogpt_platform/frontend/src/components/nav/MarketPopup.tsx b/autogpt_platform/frontend/src/components/nav/MarketPopup.tsx index bdb5d1bee026..c04c32ecc3e2 100644 --- a/autogpt_platform/frontend/src/components/nav/MarketPopup.tsx +++ b/autogpt_platform/frontend/src/components/nav/MarketPopup.tsx @@ -7,7 +7,15 @@ interface MarketPopupProps extends ButtonHTMLAttributes { export default function MarketPopup({ className = "", - marketplaceUrl = "http://platform.agpt.co/marketplace", + marketplaceUrl = (() => { + if (process.env.NEXT_PUBLIC_APP_ENV === "prod") { + return "https://production-marketplace-url.com"; + } else if (process.env.NEXT_PUBLIC_APP_ENV === "dev") { + return "https://dev-builder.agpt.co/marketplace"; + } else { + return "http://localhost:3000/marketplace"; + } + })(), children, ...props }: MarketPopupProps) { diff --git a/docs/content/platform/agent-blocks.md b/docs/content/platform/agent-blocks.md new file mode 100644 index 000000000000..0ae2d0090bd8 --- /dev/null +++ b/docs/content/platform/agent-blocks.md @@ -0,0 +1,86 @@ +# **How to Create an AI Agent as a Block in AutoGPT** + +## **Overview** + +This guide explains how to create a reusable agent block that can be used as a component in other agents. + +
+ +## **What Are Agent Blocks?** + +Agent blocks are pre-configured, reusable AI workflows that can be used as components within larger automation systems. Think of them as "smart building blocks" - each agent block is itself a complete workflow that can: + +- Accept specific inputs +- Process data using AI and traditional automation +- Produce defined outputs +- Be easily reused in different contexts + +The power of agent blocks lies in their modularity. Once you create an agent with a specific capability (like translating text or analyzing sentiment), you can reuse it as a single block in other workflows. This means you can: + +- Combine multiple agent blocks to create more complex automations +- Reuse proven workflows without rebuilding them +- Share agent blocks with other users +- Create hierarchical systems where specialized agents work together + +For example, a content creation workflow might combine several agent blocks: + +- A research agent block that gathers information +- A writing agent block that creates the initial draft +- An editing agent block that polishes the content +- A formatting agent block that prepares the final output + +## **Creating the Base Agent** + +### **Required Components** + +1. Input Block +2. AI Text Generator Block +3. Output Block + +### **Step-by-Step Setup** + +1. **Add and Configure Blocks** + * Add an Input Block + * Add an AI Text Generator Block + * Add an Output Block +2. **Connect Components** + * Connect Input's result to AI Text Generator's Prompt + * Connect AI Text Generator's response to Output's value +3. **Name the Components** + * Name the Input Block: "question" + * Name the Output Block: "answer" +4. **Save the Agent** + * Choose a descriptive name (e.g., "Weather Agent") + * Click Save + + + +## **Converting to a Block** + +1. **Access the Block Menu** + * Go to the Builder interface + * Click the Blocks menu + * Click the agent tag or search the name of your agent +2. **Using the Agent Block** + * Click on the agent block to add to your workflow + * Save the new agent with a descriptive name (e.g., "Weather Agent") + +## **Testing the Agent Block** + +1. **Run the Agent** + * Enter a test question (e.g., "How far is the Earth from the Moon?") + * Click Run +2. **View Results** + * Option 1: Check "Agent Outputs" section* + * Option 2: Click "View More" for detailed results + +*Note: if there is no output block then the "Agent Outputs" button will show up blank. You can see the output under view more or at bottom of the block. + +## **Advanced Usage** + +* You can make more complex agents by combining multiple agent blocks +* Chain different agents together for more sophisticated workflows + +## **Note** + +This is a basic example that can be expanded upon to create more complex agent blocks with additional functionality. \ No newline at end of file diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 6b1ec59e3686..56b8b9fddf00 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -10,6 +10,7 @@ nav: - The AutoGPT Platform 🆕: - Getting Started: platform/getting-started.md - Advanced Setup: platform/advanced_setup.md + - Agent Blocks: platform/agent-blocks.md - Build your own Blocks: platform/new_blocks.md - Using Ollama: platform/ollama.md - Using D-ID: platform/d_id.md