forked from abetlen/llama-cpp-python
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathagent.py
More file actions
76 lines (68 loc) · 2.3 KB
/
agent.py
File metadata and controls
76 lines (68 loc) · 2.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
#!/usr/bin/env python
"""Example LangChain server exposes a conversational retrieval chain."""
from langchain.agents import AgentExecutor, tool
from langchain.agents.format_scratchpad import format_to_openai_functions
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.pydantic_v1 import BaseModel
from langchain.tools.render import format_tool_to_openai_function
from langchain.vectorstores.chroma import Chroma
from llama_cpp.server.plugins import ChainPlugin
from llama_cpp.server.storage import chroma_client
base_url = 'http://127.0.0.1:8000/v1'
embeddings = OpenAIEmbeddings(
base_url=base_url,
model='mistral-7b-instruct-v0.1.Q4_0',
api_key='xxx',
)
llm = ChatOpenAI(
base_url=base_url,
model='mistral-7b-instruct-v0.1.Q4_0',
api_key='xxx',
)
vectorstore = Chroma(
client=chroma_client,
#collection_name="collection_name",
embedding_function=embeddings,
)
retriever = vectorstore.as_retriever()
@tool
def get_eugene_thoughts(query: str) -> list:
"""Returns Eugene's thoughts on a topic."""
return retriever.get_relevant_documents(query)
tools = [get_eugene_thoughts]
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant."),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
llm_with_tools = llm.bind(functions=[format_tool_to_openai_function(t) for t in tools])
agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_to_openai_functions(
x["intermediate_steps"]
),
}
| prompt
| llm_with_tools
| OpenAIFunctionsAgentOutputParser()
)
agent_executor = AgentExecutor(agent=agent, tools=tools)
# We need to add these input/output schemas because the current AgentExecutor
# is lacking in schemas.
class Input(BaseModel):
input: str
class Output(BaseModel):
output: str
# Adds routes to the app for using the chain under:
# /invoke
# /batch
# /stream
class ExampleChainPlugin(ChainPlugin):
runnable = agent_executor.with_types(input_type=Input, output_type=Output)
path = '/' + __name__.split('.')[-1]