Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding declarative HTTP tools to autogen ext #5181

Open
wants to merge 15 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions python/packages/autogen-ext/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,11 @@ semantic-kernel-dapr = [
"semantic-kernel[dapr]>=1.17.1",
]

http = [
"httpx>=0.27.0",
"json-schema-to-pydantic>=0.2.0"
]

semantic-kernel-all = [
"semantic-kernel[google,hugging_face,mistralai,ollama,onnx,anthropic,usearch,pandas,aws,dapr]>=1.17.1",
]
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from ._http_tool import HttpTool

__all__ = ["HttpTool"]
212 changes: 212 additions & 0 deletions python/packages/autogen-ext/src/autogen_ext/tools/http/_http_tool.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,212 @@
import re
from typing import Any, Literal, Optional, Type

import httpx
from autogen_core import CancellationToken, Component
from autogen_core.tools import BaseTool
from json_schema_to_pydantic import create_model
from pydantic import BaseModel, Field


class HttpToolConfig(BaseModel):
name: str
"""
The name of the tool.
"""
description: Optional[str]
"""
A description of the tool.
"""
scheme: Literal["http", "https"] = "http"
"""
The scheme to use for the request.
"""
host: str
"""
The URL to send the request to.
"""
port: int
"""
The port to send the request to.
"""
path: str = Field(default="/")
"""
The path to send the request to. defaults to "/"
The path can accept parameters, e.g. "/{param1}/{param2}".
These parameters will be templated from the inputs args, any additional parameters will be added as query parameters or the body of the request.
"""
method: Optional[Literal["GET", "POST", "PUT", "DELETE", "PATCH"]] = "POST"
"""
The HTTP method to use, will default to POST if not provided.
"""
headers: Optional[dict[str, Any]]
"""
A dictionary of headers to send with the request.
"""
json_schema: dict[str, Any]
"""
A JSON Schema object defining the expected parameters for the tool.
Path parameters MUST also be included in the json_schema. They must also MUST be set to string
"""


class HttpTool(BaseTool[BaseModel, Any], Component[HttpToolConfig]):
"""A wrapper for using an HTTP server as a tool.

Args:
name (str): The name of the tool.
description (str, optional): A description of the tool.
scheme (str): The scheme to use for the request. Must be either "http" or "https".
host (str): The host to send the request to.
port (int): The port to send the request to.
path (str, optional): The path to send the request to. Defaults to "/".
Can include path parameters like "/{param1}/{param2}" which will be templated from input args.
method (str, optional): The HTTP method to use, will default to POST if not provided.
Must be one of "GET", "POST", "PUT", "DELETE", "PATCH".
headers (dict[str, Any], optional): A dictionary of headers to send with the request.
json_schema (dict[str, Any]): A JSON Schema object defining the expected parameters for the tool.
Path parameters must also be included in the schema and must be strings.

Example:
Simple use case::

import asyncio

from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.messages import TextMessage
from autogen_core import CancellationToken
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_ext.tools.http import HttpTool

# Define a JSON schema for a base64 decode tool
base64_schema = {
"type": "object",
"properties": {
"value": {"type": "string", "description": "The base64 value to decode"},
},
"required": ["value"]
}

# Create an HTTP tool for the weather API
base64_tool = HttpTool(
name="base64_decode",
description="base64 decode a value",
scheme="https",
host="httpbin.org",
port=443,
path="/base64/{value}",
method="GET",
json_schema=base64_schema
)

async def main():
# Create an assistant with the base64 tool
model = OpenAIChatCompletionClient(model="gpt-4")
assistant = AssistantAgent(
"base64_assistant",
model_client=model,
tools=[base64_tool]
)

# The assistant can now use the base64 tool to decode the string
response = await assistant.on_messages([
TextMessage(content="Can you base64 decode the value 'YWJjZGU=', please?", source="user")
], CancellationToken())
print(response.chat_message.content)

asyncio.run(main())
"""

component_type = "tool"
component_provider_override = "autogen_ext.tools.http.HttpTool"
component_config_schema = HttpToolConfig

def __init__(
self,
name: str,
host: str,
port: int,
json_schema: dict[str, Any],
headers: Optional[dict[str, Any]] = None,
description: str = "HTTP tool",
path: str = "/",
scheme: Literal["http", "https"] = "http",
method: Literal["GET", "POST", "PUT", "DELETE", "PATCH"] = "POST",
) -> None:
self.server_params = HttpToolConfig(
name=name,
description=description,
host=host,
port=port,
path=path,
scheme=scheme,
method=method,
headers=headers,
json_schema=json_schema,
)

# Use regex to find all path parameters, we will need those later to template the path
path_params = {match.group(1) for match in re.finditer(r"{([^}]*)}", path)}
self._path_params = path_params

# Create the input model from the modified schema
input_model = create_model(json_schema)

# Use Any as return type since HTTP responses can vary
return_type: Type[Any] = object

super().__init__(input_model, return_type, name, description)

def _to_config(self) -> HttpToolConfig:
copied_config = self.server_params.model_copy()
return copied_config

@classmethod
def _from_config(cls, config: HttpToolConfig):
copied_config = config.model_copy().model_dump()
return cls(**copied_config)

async def run(self, args: BaseModel, cancellation_token: CancellationToken) -> Any:
"""Execute the HTTP tool with the given arguments.

Args:
args: The validated input arguments
cancellation_token: Token for cancelling the operation

Returns:
The response body from the HTTP call in JSON format

Raises:
Exception: If tool execution fails
"""


model_dump = args.model_dump()
path_params = {k: v for k, v in model_dump.items() if k in self._path_params}
# Remove path params from the model dump
for k in self._path_params:
model_dump.pop(k)

path = self.server_params.path.format(**path_params)

url = httpx.URL(
scheme=self.server_params.scheme,
host=self.server_params.host,
port=self.server_params.port,
path=path,
)
async with httpx.AsyncClient() as client:
match self.server_params.method:
case "GET":
response = await client.get(url, params=model_dump)
case "PUT":
response = await client.put(url, json=model_dump)
case "DELETE":
response = await client.delete(url, params=model_dump)
case "PATCH":
response = await client.patch(url, json=model_dump)
case _: # Default case POST
response = await client.post(url, json=model_dump)

# TODO: (EItanya): Think about adding the ability to parse the response as JSON, or check a schema
return response.text
103 changes: 103 additions & 0 deletions python/packages/autogen-ext/tests/tools/http/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
import asyncio
from typing import AsyncGenerator

import pytest
import pytest_asyncio
import uvicorn
from autogen_core import CancellationToken, ComponentModel
from autogen_ext.tools.http import HttpTool
from fastapi import Body, FastAPI
from pydantic import BaseModel, Field


class TestArgs(BaseModel):
query: str = Field(description="The test query")
value: int = Field(description="A test value")


class TestResponse(BaseModel):
result: str = Field(description="The test result")


# Create a test FastAPI app
app = FastAPI()


@app.post("/test")
async def test_endpoint(body: TestArgs = Body(...)) -> TestResponse:
return TestResponse(result=f"Received: {body.query} with value {body.value}")

@app.post("/test/{query}/{value}")
async def test_path_params_endpoint(query: str, value: int) -> TestResponse:
return TestResponse(result=f"Received: {query} with value {value}")

@app.put("/test/{query}/{value}")
async def test_path_params_and_body_endpoint(
query: str,
value: int,
body: dict = Body(...)
) -> TestResponse:
return TestResponse(result=f"Received: {query} with value {value} and extra {body.get("extra")}")

@app.get("/test")
async def test_get_endpoint(query: str, value: int) -> TestResponse:
return TestResponse(result=f"Received: {query} with value {value}")


@app.put("/test")
async def test_put_endpoint(body: TestArgs = Body(...)) -> TestResponse:
return TestResponse(result=f"Received: {body.query} with value {body.value}")


@app.delete("/test")
async def test_delete_endpoint(query: str, value: int) -> TestResponse:
return TestResponse(result=f"Received: {query} with value {value}")


@app.patch("/test")
async def test_patch_endpoint(body: TestArgs = Body(...)) -> TestResponse:
return TestResponse(result=f"Received: {body.query} with value {body.value}")


@pytest.fixture
def test_config() -> ComponentModel:
return ComponentModel(
provider="autogen_ext.tools.http.HttpTool",
config={
"name": "TestHttpTool",
"description": "A test HTTP tool",
"scheme": "http",
"path": "/test",
"host": "localhost",
"port": 8000,
"method": "POST",
"headers": {"Content-Type": "application/json"},
"json_schema": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "The test query"},
"value": {"type": "integer", "description": "A test value"},
},
"required": ["query", "value"],
},
},
)


@pytest_asyncio.fixture
async def test_server() -> AsyncGenerator[None, None]:
# Start the test server
config = uvicorn.Config(app, host="127.0.0.1", port=8000, log_level="error")
server = uvicorn.Server(config)

# Create a task for the server
server_task = asyncio.create_task(server.serve())

# Wait a bit for server to start
await asyncio.sleep(0.5) # Increased sleep time to ensure server is ready

yield

# Cleanup
server.should_exit = True
await server_task
Loading