From f5ca789baaaa54f79a346a5be3dd55abbec1f630 Mon Sep 17 00:00:00 2001
From: Li Yin
Date: Wed, 3 Jul 2024 23:36:54 -0700
Subject: [PATCH] update root readme
---
README.md | 54 ++++++++++++++++++++++++-------
developer_notes/generator_note.py | 25 +++++++-------
lightrag/pyproject.toml | 2 +-
3 files changed, 58 insertions(+), 23 deletions(-)
diff --git a/README.md b/README.md
index 895eee7c..bc8067ad 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
![LightRAG Logo](https://raw.githubusercontent.com/SylphAI-Inc/LightRAG/main/docs/source/_static/images/LightRAG-logo-doc.jpeg)
-### ⚡⚡⚡ The PyTorch Library for Large language Model (LLM) Applications ⚡⚡⚡
+### ⚡ The PyTorch Library for Large language Model (LLM) Applications ⚡
*LightRAG* helps developers with both building and optimizing *Retriever-Agent-Generator (RAG)* pipelines.
It is *light*, *modular*, and *robust*.
@@ -34,15 +34,14 @@ class Net(nn.Module):
## LightRAG Task Pipeline
-We will ask the model to response with ``explaination`` and ``example`` of a concept. And we built the pipeline to get the structured output as ``QAOutput``.
+We will ask the model to respond with ``explaination`` and ``example`` of a concept. And we will build a pipeline to get the structured output as ``QAOutput``.
```python
from dataclasses import dataclass, field
-from lightrag.core import Component, Generator, fun_to_component
+from lightrag.core import Component, Generator, DataClass, fun_to_component, Sequential
from lightrag.components.model_client import GroqAPIClient
-from lightrag.core import DataClass, fun_to_component, Sequential
from lightrag.components.output_parsers import JsonOutputParser
@dataclass
@@ -62,13 +61,13 @@ class QA(Component):
def __init__(self):
super().__init__()
template = r"""
- You are a helpful assistant.
-
- {{output_format_str}}
-
-
- User: {{input_str}}
- You:
+You are a helpful assistant.
+
+{{output_format_str}}
+
+
+User: {{input_str}}
+You:
"""
parser = JsonOutputParser(data_class=QAOutput)
self.generator = Generator(
@@ -152,6 +151,39 @@ Here is what we get from ``print(output)``:
```
GeneratorOutput(data=QAOutput(explaination='LLM stands for Large Language Model, which refers to a type of artificial intelligence designed to process and generate human-like language.', example='For example, a LLM can be trained to generate news articles, conversations, or even entire books, and can be used for a variety of applications such as language translation, text summarization, and chatbots.'), error=None, usage=None, raw_response='```\n{\n "explaination": "LLM stands for Large Language Model, which refers to a type of artificial intelligence designed to process and generate human-like language.",\n "example": "For example, a LLM can be trained to generate news articles, conversations, or even entire books, and can be used for a variety of applications such as language translation, text summarization, and chatbots."\n}', metadata=None)
```
+**See the prompt**
+
+Use the following code:
+
+```python
+
+qa2.generator.print_prompt(
+ output_format_str=qa2.generator.output_processors[0].format_instructions(),
+ input_str="What is LLM?",
+)
+```
+
+
+The output will be:
+
+```
+You are a helpful assistant.
+
+Your output should be formatted as a standard JSON instance with the following schema:
+```
+{
+ "explaination": "A brief explaination of the concept in one sentence. (str) (required)",
+ "example": "An example of the concept in a sentence. (str) (required)"
+}
+```
+-Make sure to always enclose the JSON output in triple backticks (```). Please do not add anything other than valid JSON output!
+-Use double quotes for the keys and string values.
+-Follow the JSON formatting conventions.
+
+
+User: What is LLM?
+You:
+```
## Quick Install
diff --git a/developer_notes/generator_note.py b/developer_notes/generator_note.py
index 5cc28339..f7a57f2e 100644
--- a/developer_notes/generator_note.py
+++ b/developer_notes/generator_note.py
@@ -1,8 +1,7 @@
from dataclasses import dataclass, field
-from lightrag.core import Component, Generator
+from lightrag.core import Component, Generator, DataClass, fun_to_component, Sequential
from lightrag.components.model_client import GroqAPIClient
-from lightrag.core import DataClass, fun_to_component, Sequential
from lightrag.components.output_parsers import JsonOutputParser
from lightrag.utils import setup_env
@@ -48,13 +47,13 @@ class QA(Component):
def __init__(self):
super().__init__()
template = r"""
- You are a helpful assistant.
-
- {{output_format_str}}
-
-
- User: {{input_str}}
- You:
+You are a helpful assistant.
+
+{{output_format_str}}
+
+
+User: {{input_str}}
+You:
"""
parser = JsonOutputParser(data_class=QAOutput)
self.generator = Generator(
@@ -123,8 +122,8 @@ def use_its_own_template():
from lightrag.components.model_client import GroqAPIClient
template = r"""{{task_desc_str}}
- User: {{input_str}}
- You:"""
+User: {{input_str}}
+You:"""
generator = Generator(
model_client=GroqAPIClient(),
model_kwargs={"model": "llama3-8b-8192"},
@@ -225,6 +224,10 @@ def create_purely_from_config_2():
answer = qa2("What is LLM?")
print(qa2)
print(answer)
+ qa2.generator.print_prompt(
+ output_format_str=qa2.generator.output_processors[0].format_instructions(),
+ input_str="What is LLM?",
+ )
# minimum_generator()
# use_a_json_parser()
diff --git a/lightrag/pyproject.toml b/lightrag/pyproject.toml
index 6b1c608f..6226b0e4 100644
--- a/lightrag/pyproject.toml
+++ b/lightrag/pyproject.toml
@@ -1,7 +1,7 @@
[tool.poetry]
name = "lightrag"
-version = "0.0.0-alpha.10"
+version = "0.0.0-alpha.11"
description = "The 'PyTorch' library for LLM applications. RAG=Retriever-Agent-Generator."
authors = ["Li Yin "]
readme = "README.md"