From caa37a87c443b2d2bbf3fd3efc626727318386dc Mon Sep 17 00:00:00 2001 From: codebane Date: Fri, 6 Oct 2023 05:13:30 +0300 Subject: [PATCH] Adding custom query planners --- llm-server/readme.md | 2 ++ .../workflow/extractors/extract_param.py | 2 +- .../extractors/transform_api_response.py | 23 ++++----------- .../workflow/generate_openapi_payload.py | 9 ------ .../routes/workflow/hierarchical_planner.py | 2 +- .../routes/workflow/utils/run_openapi_ops.py | 17 +++++++---- .../routes/workflow/utils/run_workflow.py | 28 ++++++++++++++++--- 7 files changed, 46 insertions(+), 37 deletions(-) diff --git a/llm-server/readme.md b/llm-server/readme.md index 51d348d92..6c3354474 100644 --- a/llm-server/readme.md +++ b/llm-server/readme.md @@ -63,6 +63,8 @@ To install Mypy, which is a static type checker for Python, follow these steps: MONGODB_URL=mongodb://localhost:27017/opencopilot QDRANT_URL=http://localhost:6333 STORE=QDRANT + QDRANT_API_KEY= # When using cloud hosted version + SCORE_THRESHOLD=0.95 # When using pre defined workflows, the confidence score at which the opencopilot should select your workflow. If the score falls below this, the planner will design it's own workflow ``` Ensure you replace the placeholders with your actual API keys and configuration settings. diff --git a/llm-server/routes/workflow/extractors/extract_param.py b/llm-server/routes/workflow/extractors/extract_param.py index fb65ce8c5..414fe17fb 100644 --- a/llm-server/routes/workflow/extractors/extract_param.py +++ b/llm-server/routes/workflow/extractors/extract_param.py @@ -28,7 +28,7 @@ def gen_params_from_schema( HumanMessage(content="User input: {}".format(text)), HumanMessage(content="prev api responses: {}".format(prev_resp)), HumanMessage( - content="Given the provided information, generate the appropriate JSON payload to use as parameters for the API request" + content="Based on the information provided, construct a valid parameter object to be used with python requests library. In cases where user input doesnot contain information for a query, DO NOT add that specific query parameter to the output. " ), ] result = chat(messages) diff --git a/llm-server/routes/workflow/extractors/transform_api_response.py b/llm-server/routes/workflow/extractors/transform_api_response.py index 3fcc05eea..9d8791550 100644 --- a/llm-server/routes/workflow/extractors/transform_api_response.py +++ b/llm-server/routes/workflow/extractors/transform_api_response.py @@ -1,7 +1,5 @@ import os, logging from langchain.chat_models import ChatOpenAI -from custom_types.t_json import JsonData -from typing import Optional from dotenv import load_dotenv from langchain.schema import HumanMessage, SystemMessage from typing import Any @@ -12,9 +10,7 @@ openai_api_key = os.getenv("OPENAI_API_KEY") -def transform_api_response_from_schema( - server_url: str, api_response: str -) -> Optional[JsonData]: +def transform_api_response_from_schema(server_url: str, api_response: str) -> str: chat = ChatOpenAI( openai_api_key=os.getenv("OPENAI_API_KEY"), model="gpt-3.5-turbo-16k", @@ -23,26 +19,19 @@ def transform_api_response_from_schema( messages = [ SystemMessage( - content="You are an intelligent AI assistant that can identify important fields from a REST API response." + content="You are a bot capable of comprehending API responses." ), HumanMessage( - content="Here is the response from a REST API call: {} for endpoint: {}".format( + content="Here is the response from current REST API: {} for endpoint: {}".format( api_response, server_url ) ), HumanMessage( - content="Please examine the given API response and return only the fields that are important when making API calls. Ignore any unimportant fields. Structure your response as a JSON object with self-descriptive keys mapped to the corresponding values from the API response." + content="Analyze the provided API responses and extract only the essential fields required for subsequent API interactions. Disregard any non-essential attributes such as CSS or color-related data. If there are generic fields like 'id,' provide them with more descriptive names in your response. Format your response as a JSON object with clear and meaningful keys that map to their respective values from the API response." ), ] result = chat(messages) - logging.info("[OpenCopilot] LLM Body Response: {}".format(result.content)) + logging.info("[OpenCopilot] Transformed Response: {}".format(result.content)) - d = extract_json_payload(result.content) - logging.info( - "[OpenCopilot] Parsed the json payload: {}, context: {}".format( - d, "gen_body_from_schema" - ) - ) - - return d + return result.content diff --git a/llm-server/routes/workflow/generate_openapi_payload.py b/llm-server/routes/workflow/generate_openapi_payload.py index d284cfd4a..91bc0f6f6 100644 --- a/llm-server/routes/workflow/generate_openapi_payload.py +++ b/llm-server/routes/workflow/generate_openapi_payload.py @@ -1,9 +1,6 @@ import re import os import json -from routes.workflow.extractors.transform_api_response import ( - transform_api_response_from_schema, -) from utils.get_llm import get_llm from dotenv import load_dotenv from .extractors.example_generator import gen_ex_from_schema @@ -113,13 +110,7 @@ def generate_openapi_payload( api_info.body_schema = gen_body_from_schema( json.dumps(api_info.body_schema), text, prev_api_response, example ) - # when you come back, clear the trello board and - # extract api info and set it up for next call - transformed_response = transform_api_response_from_schema( - api_info.endpoint or "", api_info.body_schema - ) - prev_api_response = prev_api_response + json.loads(transformed_response) else: api_info.body_schema = {} diff --git a/llm-server/routes/workflow/hierarchical_planner.py b/llm-server/routes/workflow/hierarchical_planner.py index 85cd23844..7da8a8b84 100644 --- a/llm-server/routes/workflow/hierarchical_planner.py +++ b/llm-server/routes/workflow/hierarchical_planner.py @@ -11,7 +11,7 @@ def create_and_run_openapi_agent( - swagger_json: Any, user_query: str, headers: Dict[str, str] = {} + swagger_json: Any, user_query: str, headers: Dict[str, str] = {} ) -> Any: # Load OpenAPI spec # raw_spec = json.loads(swagger_json) diff --git a/llm-server/routes/workflow/utils/run_openapi_ops.py b/llm-server/routes/workflow/utils/run_openapi_ops.py index 89bce4067..d3d3120d4 100644 --- a/llm-server/routes/workflow/utils/run_openapi_ops.py +++ b/llm-server/routes/workflow/utils/run_openapi_ops.py @@ -4,6 +4,9 @@ import traceback import logging from typing import Any +from routes.workflow.extractors.transform_api_response import ( + transform_api_response_from_schema, +) def run_openapi_operations( @@ -13,10 +16,9 @@ def run_openapi_operations( headers: Any, server_base_url: str, ) -> str: + prev_api_response = "" record_info = {"Workflow Name": record.get("name")} for flow in record.get("flows", []): - prev_api_response = "" - for step in flow.get("steps"): try: operation_id = step.get("open_api_operation_id") @@ -25,8 +27,13 @@ def run_openapi_operations( ) api_response = make_api_request(headers=headers, **api_payload.__dict__) + + transformed_response = transform_api_response_from_schema( + api_payload.endpoint or "", api_response.text + ) + + prev_api_response = prev_api_response + transformed_response record_info[operation_id] = json.loads(api_response.text) - prev_api_response = api_response.text except Exception as e: logging.error("Error making API call", exc_info=True) @@ -36,8 +43,8 @@ def run_openapi_operations( "error": str(e), "traceback": traceback.format_exc(), } - record_info[operation_id] = error_info - prev_api_response = "" + # At this point we will retry the operation with hierarchical planner + raise e return json.dumps(record_info) diff --git a/llm-server/routes/workflow/utils/run_workflow.py b/llm-server/routes/workflow/utils/run_workflow.py index 164c91e54..a3d6a301b 100644 --- a/llm-server/routes/workflow/utils/run_workflow.py +++ b/llm-server/routes/workflow/utils/run_workflow.py @@ -2,6 +2,8 @@ from routes.workflow.typings.run_workflow_input import WorkflowData from routes.workflow.utils.run_openapi_ops import run_openapi_operations from opencopilot_types.workflow_type import WorkflowDataType +from routes.workflow.hierarchical_planner import create_and_run_openapi_agent +import logging, json def run_workflow( @@ -10,8 +12,26 @@ def run_workflow( headers = data.headers or {} server_base_url = data.server_base_url - result = run_openapi_operations( - workflow_doc, swagger_json, data.text, headers, server_base_url - ) + result = "" + error = None - return {"response": result} + try: + result = run_openapi_operations( + workflow_doc, swagger_json, data.text, headers, server_base_url + ) + except Exception as e: + logging.error("[OpenCopilot] Custom planner failed: %s", e) + error = str(e) + + try: + result = create_and_run_openapi_agent(swagger_json, data.text, headers) + except Exception as e: + logging.error("[OpenCopilot] Hierarchical planner failed: %s", e) + error = str(e) + raise + + output = {"response": result if not error else "", "error": error} + + logging.info("[OpenCopilot] Workflow output %s", json.dumps(output)) + + return output