Skip to content
This repository has been archived by the owner on Feb 12, 2024. It is now read-only.

Commit

Permalink
tweaks
Browse files Browse the repository at this point in the history
  • Loading branch information
emrgnt-cmplxty committed Oct 30, 2023
1 parent cd9208c commit 5e1d114
Show file tree
Hide file tree
Showing 3 changed files with 14 additions and 13 deletions.
12 changes: 6 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,6 @@ pip install 'sciphi[all_with_extras]'
- **All (no vLLM)**: `'sciphi[all]'`
- **Anthropic**: `'sciphi[anthropic_support]'`
- **HF (includes Torch)**: `'sciphi[hf_support]'`
- **Llama-CPP**: `'sciphi[llama_cpp_support]'`
- **Llama-Index**: `'sciphi[llama_index_support]'`
- **VLLM (includes Torch)**: `'sciphi[vllm_support]'`

### **Setup Your Environment**
Expand All @@ -41,15 +39,17 @@ Navigate to your working directory and use a text editor to adjust the `.env` fi

```bash
# Proprietary Providers
OPENAI_API_KEY=your_openai_key
ANTHROPIC_API_KEY=your_anthropic_key
OPENAI_API_KEY=your_openai_api_key
ANTHROPIC_API_KEY=your_anthropic_api_key
# Open Source Providers
HF_TOKEN=your_huggingface_token
# vLLM
VLLM_API_KEY=your_vllm_token
VLLM_API_KEY=your_vllm_api_key # for remote vLLM use.
# SciPhi
SCIPHI_API_KEY=your_sciphi_api_key # for remote vLLM use.
# RAG Provider Settings
RAG_API_KEY=your_rag_server_api_key
RAG_API_BASE=your_rag_server_base_url
RAG_API_KEY=your_rag_server_key
```

After entering your settings, ensure you save and exit the file.
Expand Down
10 changes: 5 additions & 5 deletions sciphi/llm/models/sciphi_llm.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
"""A module for managing local vLLM models."""

import os
import logging
from dataclasses import dataclass
from enum import Enum
from typing import Optional

from sciphi.core import LLMProviderName
from sciphi.llm import LLM, GenerationConfig, LLMConfig
from sciphi.llm.config_manager import model_config

logging.basicConfig(level=logging.INFO)
from enum import Enum


class SciPhiProviderMode(Enum):
Expand Down Expand Up @@ -48,20 +48,20 @@ def __init__(
SciPhiProviderMode.REMOTE,
SciPhiProviderMode.LOCAL_VLLM,
]:
# Remtoe and local vLLM are both powered by vLLM
# Remote and local vLLM are both powered by vLLM
assert self.config.sub_provider_name == LLMProviderName.VLLM
from sciphi.llm.models.vllm_llm import (
vLLM,
vLLMConfig,
vLLMProviderMode,
vLLM,
)

if self.config.mode == SciPhiProviderMode.REMOTE:
self.model = vLLM(
vLLMConfig(
provider_name=config.provider_name,
server_base=config.server_base,
api_key=config.api_key,
api_key=config.api_key or os.getenv("SCIPHI_API_KEY"),
mode=vLLMProviderMode.REMOTE,
),
)
Expand Down
5 changes: 3 additions & 2 deletions sciphi/llm/models/vllm_llm.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
"""A module for managing local vLLM models."""

import os
import logging
from dataclasses import dataclass
from typing import Optional
Expand Down Expand Up @@ -42,7 +43,7 @@ def __init__(
) -> None:
self.config = config
if config.mode == vLLMProviderMode.REMOTE:
from sciphi.llm.models.openai_llm import OpenAILLM, OpenAIConfig
from sciphi.llm.models.openai_llm import OpenAIConfig, OpenAILLM

self.model = OpenAILLM(OpenAIConfig(config.provider_name))
else:
Expand All @@ -66,7 +67,7 @@ def get_instruct_completion(

if self.config.server_base:
openai.api_base = self.config.server_base
openai.api_key = self.config.api_key
openai.api_key = self.config.api_key or os.getenv("VLLM_API_KEY")
return self.model.get_instruct_completion(
prompt, generation_config
)
Expand Down

0 comments on commit 5e1d114

Please sign in to comment.