Skip to content
This repository has been archived by the owner on Jan 5, 2025. It is now read-only.

Commit

Permalink
Adding custom query planners
Browse files Browse the repository at this point in the history
  • Loading branch information
codebanesr committed Oct 6, 2023
1 parent 45fffd4 commit caa37a8
Show file tree
Hide file tree
Showing 7 changed files with 46 additions and 37 deletions.
2 changes: 2 additions & 0 deletions llm-server/readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,8 @@ To install Mypy, which is a static type checker for Python, follow these steps:
MONGODB_URL=mongodb://localhost:27017/opencopilot
QDRANT_URL=http://localhost:6333
STORE=QDRANT
QDRANT_API_KEY= # When using cloud hosted version
SCORE_THRESHOLD=0.95 # When using pre defined workflows, the confidence score at which the opencopilot should select your workflow. If the score falls below this, the planner will design it's own workflow
```

Ensure you replace the placeholders with your actual API keys and configuration settings.
Expand Down
2 changes: 1 addition & 1 deletion llm-server/routes/workflow/extractors/extract_param.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def gen_params_from_schema(
HumanMessage(content="User input: {}".format(text)),
HumanMessage(content="prev api responses: {}".format(prev_resp)),
HumanMessage(
content="Given the provided information, generate the appropriate JSON payload to use as parameters for the API request"
content="Based on the information provided, construct a valid parameter object to be used with python requests library. In cases where user input doesnot contain information for a query, DO NOT add that specific query parameter to the output. "
),
]
result = chat(messages)
Expand Down
23 changes: 6 additions & 17 deletions llm-server/routes/workflow/extractors/transform_api_response.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
import os, logging
from langchain.chat_models import ChatOpenAI
from custom_types.t_json import JsonData
from typing import Optional
from dotenv import load_dotenv
from langchain.schema import HumanMessage, SystemMessage
from typing import Any
Expand All @@ -12,9 +10,7 @@
openai_api_key = os.getenv("OPENAI_API_KEY")


def transform_api_response_from_schema(
server_url: str, api_response: str
) -> Optional[JsonData]:
def transform_api_response_from_schema(server_url: str, api_response: str) -> str:
chat = ChatOpenAI(
openai_api_key=os.getenv("OPENAI_API_KEY"),
model="gpt-3.5-turbo-16k",
Expand All @@ -23,26 +19,19 @@ def transform_api_response_from_schema(

messages = [
SystemMessage(
content="You are an intelligent AI assistant that can identify important fields from a REST API response."
content="You are a bot capable of comprehending API responses."
),
HumanMessage(
content="Here is the response from a REST API call: {} for endpoint: {}".format(
content="Here is the response from current REST API: {} for endpoint: {}".format(
api_response, server_url
)
),
HumanMessage(
content="Please examine the given API response and return only the fields that are important when making API calls. Ignore any unimportant fields. Structure your response as a JSON object with self-descriptive keys mapped to the corresponding values from the API response."
content="Analyze the provided API responses and extract only the essential fields required for subsequent API interactions. Disregard any non-essential attributes such as CSS or color-related data. If there are generic fields like 'id,' provide them with more descriptive names in your response. Format your response as a JSON object with clear and meaningful keys that map to their respective values from the API response."
),
]

result = chat(messages)
logging.info("[OpenCopilot] LLM Body Response: {}".format(result.content))
logging.info("[OpenCopilot] Transformed Response: {}".format(result.content))

d = extract_json_payload(result.content)
logging.info(
"[OpenCopilot] Parsed the json payload: {}, context: {}".format(
d, "gen_body_from_schema"
)
)

return d
return result.content
9 changes: 0 additions & 9 deletions llm-server/routes/workflow/generate_openapi_payload.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
import re
import os
import json
from routes.workflow.extractors.transform_api_response import (
transform_api_response_from_schema,
)
from utils.get_llm import get_llm
from dotenv import load_dotenv
from .extractors.example_generator import gen_ex_from_schema
Expand Down Expand Up @@ -113,13 +110,7 @@ def generate_openapi_payload(
api_info.body_schema = gen_body_from_schema(
json.dumps(api_info.body_schema), text, prev_api_response, example
)
# when you come back, clear the trello board and
# extract api info and set it up for next call
transformed_response = transform_api_response_from_schema(
api_info.endpoint or "", api_info.body_schema
)

prev_api_response = prev_api_response + json.loads(transformed_response)
else:
api_info.body_schema = {}

Expand Down
2 changes: 1 addition & 1 deletion llm-server/routes/workflow/hierarchical_planner.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@


def create_and_run_openapi_agent(
swagger_json: Any, user_query: str, headers: Dict[str, str] = {}
swagger_json: Any, user_query: str, headers: Dict[str, str] = {}
) -> Any:
# Load OpenAPI spec
# raw_spec = json.loads(swagger_json)
Expand Down
17 changes: 12 additions & 5 deletions llm-server/routes/workflow/utils/run_openapi_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@
import traceback
import logging
from typing import Any
from routes.workflow.extractors.transform_api_response import (
transform_api_response_from_schema,
)


def run_openapi_operations(
Expand All @@ -13,10 +16,9 @@ def run_openapi_operations(
headers: Any,
server_base_url: str,
) -> str:
prev_api_response = ""
record_info = {"Workflow Name": record.get("name")}
for flow in record.get("flows", []):
prev_api_response = ""

for step in flow.get("steps"):
try:
operation_id = step.get("open_api_operation_id")
Expand All @@ -25,8 +27,13 @@ def run_openapi_operations(
)

api_response = make_api_request(headers=headers, **api_payload.__dict__)

transformed_response = transform_api_response_from_schema(
api_payload.endpoint or "", api_response.text
)

prev_api_response = prev_api_response + transformed_response
record_info[operation_id] = json.loads(api_response.text)
prev_api_response = api_response.text

except Exception as e:
logging.error("Error making API call", exc_info=True)
Expand All @@ -36,8 +43,8 @@ def run_openapi_operations(
"error": str(e),
"traceback": traceback.format_exc(),
}

record_info[operation_id] = error_info

prev_api_response = ""
# At this point we will retry the operation with hierarchical planner
raise e
return json.dumps(record_info)
28 changes: 24 additions & 4 deletions llm-server/routes/workflow/utils/run_workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
from routes.workflow.typings.run_workflow_input import WorkflowData
from routes.workflow.utils.run_openapi_ops import run_openapi_operations
from opencopilot_types.workflow_type import WorkflowDataType
from routes.workflow.hierarchical_planner import create_and_run_openapi_agent
import logging, json


def run_workflow(
Expand All @@ -10,8 +12,26 @@ def run_workflow(
headers = data.headers or {}
server_base_url = data.server_base_url

result = run_openapi_operations(
workflow_doc, swagger_json, data.text, headers, server_base_url
)
result = ""
error = None

return {"response": result}
try:
result = run_openapi_operations(
workflow_doc, swagger_json, data.text, headers, server_base_url
)
except Exception as e:
logging.error("[OpenCopilot] Custom planner failed: %s", e)
error = str(e)

try:
result = create_and_run_openapi_agent(swagger_json, data.text, headers)
except Exception as e:
logging.error("[OpenCopilot] Hierarchical planner failed: %s", e)
error = str(e)
raise

output = {"response": result if not error else "", "error": error}

logging.info("[OpenCopilot] Workflow output %s", json.dumps(output))

return output

0 comments on commit caa37a8

Please sign in to comment.