-
Notifications
You must be signed in to change notification settings - Fork 25
Expand file tree
/
Copy pathlangchain-example.py
More file actions
285 lines (218 loc) · 8.42 KB
/
langchain-example.py
File metadata and controls
285 lines (218 loc) · 8.42 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
#!/usr/bin/env python3
"""
LangChain Framework Example
Demonstrates using LangChain for building LLM applications.
Requirements:
pip install langchain langchain-openai langchain-community python-dotenv
Setup:
1. Create a .env file in the project root
2. Add your OpenAI API key: OPENAI_API_KEY=sk-...
"""
import os
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.schema import HumanMessage, SystemMessage, AIMessage
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
from typing import List
# Load environment variables
load_dotenv()
def basic_llm_call():
"""Simple LLM call with LangChain."""
print("=" * 60)
print("1. Basic LLM Call")
print("=" * 60)
llm = ChatOpenAI(
model="gpt-4o-mini",
temperature=0.7,
api_key=os.getenv("OPENAI_API_KEY")
)
messages = [
SystemMessage(content="You are a helpful AI assistant."),
HumanMessage(content="What is LangChain?")
]
response = llm.invoke(messages)
print(f"User: What is LangChain?")
print(f"Assistant: {response.content}\n")
def prompt_template_example():
"""Use prompt templates for reusable prompts."""
print("=" * 60)
print("2. Prompt Templates")
print("=" * 60)
llm = ChatOpenAI(model="gpt-4o-mini", api_key=os.getenv("OPENAI_API_KEY"))
template = ChatPromptTemplate.from_messages([
("system", "You are a {expertise} expert."),
("human", "{question}")
])
chain = template | llm
result = chain.invoke({
"expertise": "Python programming",
"question": "What is a decorator?"
})
print("Template: You are a {expertise} expert.")
print("Question: What is a decorator?")
print(f"Response: {result.content}\n")
def conversation_with_memory():
"""Maintain conversation memory."""
print("=" * 60)
print("3. Conversation Memory")
print("=" * 60)
llm = ChatOpenAI(model="gpt-4o-mini", api_key=os.getenv("OPENAI_API_KEY"))
memory = ConversationBufferMemory(return_messages=True)
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful coding assistant."),
MessagesPlaceholder(variable_name="history"),
("human", "{input}")
])
conversation = [
"What is a list comprehension in Python?",
"Show me an example",
"How is it different from a map function?"
]
for user_input in conversation:
# Get chat history
history = memory.load_memory_variables({})
# Create chain
chain = prompt | llm
# Get response
response = chain.invoke({
"history": history.get("history", []),
"input": user_input
})
# Save to memory
memory.save_context({"input": user_input}, {"output": response.content})
print(f"User: {user_input}")
print(f"Assistant: {response.content[:200]}...\n")
def structured_output_parsing():
"""Parse LLM output into structured data."""
print("=" * 60)
print("4. Structured Output Parsing")
print("=" * 60)
# Define output structure
class Recipe(BaseModel):
"""Recipe information."""
name: str = Field(description="Name of the dish")
ingredients: List[str] = Field(description="List of ingredients")
steps: List[str] = Field(description="Cooking steps")
prep_time: int = Field(description="Preparation time in minutes")
# Set up parser
parser = PydanticOutputParser(pydantic_object=Recipe)
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0, api_key=os.getenv("OPENAI_API_KEY"))
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful cooking assistant."),
("human", "{query}\n\n{format_instructions}")
])
chain = prompt | llm | parser
result = chain.invoke({
"query": "Give me a simple pasta recipe",
"format_instructions": parser.get_format_instructions()
})
print("Query: Give me a simple pasta recipe\n")
print(f"Recipe Name: {result.name}")
print(f"Prep Time: {result.prep_time} minutes")
print(f"Ingredients: {', '.join(result.ingredients[:3])}...")
print(f"Steps: {len(result.steps)} steps\n")
def simple_rag_example():
"""Simple Retrieval-Augmented Generation example."""
print("=" * 60)
print("5. Simple RAG Pattern")
print("=" * 60)
# Simulate document retrieval
documents = [
"LangChain is a framework for developing applications powered by language models.",
"It enables applications that are context-aware and can reason based on provided context.",
"LangChain provides components for working with LLMs, prompts, memory, and agents."
]
llm = ChatOpenAI(model="gpt-4o-mini", api_key=os.getenv("OPENAI_API_KEY"))
# Create RAG prompt
rag_prompt = ChatPromptTemplate.from_messages([
("system", "Answer the question based on the following context:\n\n{context}"),
("human", "{question}")
])
chain = rag_prompt | llm
question = "What does LangChain enable?"
context = "\n".join(documents)
response = chain.invoke({
"context": context,
"question": question
})
print(f"Context: [3 documents about LangChain]")
print(f"Question: {question}")
print(f"Answer: {response.content}\n")
def few_shot_prompting():
"""Demonstrate few-shot learning with examples."""
print("=" * 60)
print("6. Few-Shot Prompting")
print("=" * 60)
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0, api_key=os.getenv("OPENAI_API_KEY"))
few_shot_prompt = ChatPromptTemplate.from_messages([
("system", "You are a sentiment classifier. Respond with only: positive, negative, or neutral."),
("human", "I love this product!"),
("ai", "positive"),
("human", "This is terrible."),
("ai", "negative"),
("human", "It's okay, nothing special."),
("ai", "neutral"),
("human", "{text}")
])
chain = few_shot_prompt | llm
test_cases = [
"This is amazing!",
"I'm disappointed.",
"It works as expected."
]
print("Few-shot sentiment classification:\n")
for text in test_cases:
result = chain.invoke({"text": text})
print(f"Text: '{text}'")
print(f"Sentiment: {result.content}\n")
def chain_multiple_calls():
"""Chain multiple LLM calls together."""
print("=" * 60)
print("7. Sequential Chains")
print("=" * 60)
llm = ChatOpenAI(model="gpt-4o-mini", api_key=os.getenv("OPENAI_API_KEY"))
# First chain: Generate a topic
topic_prompt = ChatPromptTemplate.from_messages([
("system", "Generate a random technical topic in one word."),
("human", "Give me a topic")
])
# Second chain: Explain the topic
explain_prompt = ChatPromptTemplate.from_messages([
("system", "Explain the following topic in one sentence."),
("human", "{topic}")
])
topic_chain = topic_prompt | llm
explain_chain = explain_prompt | llm
# Execute chains
topic_response = topic_chain.invoke({})
topic = topic_response.content.strip()
explanation = explain_chain.invoke({"topic": topic})
print(f"Generated Topic: {topic}")
print(f"Explanation: {explanation.content}\n")
if __name__ == "__main__":
# Check if API key is set
if not os.getenv("OPENAI_API_KEY"):
print("Error: OPENAI_API_KEY not found in environment variables.")
print("Please create a .env file with your API key.")
exit(1)
print("\n" + "=" * 60)
print("LangChain Examples")
print("=" * 60 + "\n")
try:
basic_llm_call()
prompt_template_example()
conversation_with_memory()
structured_output_parsing()
simple_rag_example()
few_shot_prompting()
chain_multiple_calls()
print("=" * 60)
print("All examples completed successfully!")
print("=" * 60)
except Exception as e:
print(f"\n✗ Error running examples: {e}")
print("Make sure your OPENAI_API_KEY is valid.")