All Cheatsheets
LangChain Cheatsheet
Cheatsheet

LangChain Cheatsheet

Complete reference guide for LangChain framework with LLMs, chains, agents, memory, and retrieval patterns

DevBuild StudioUpdated February 2024

Quick Reference: Master LangChain for building LLM-powered applications. Includes chains, agents, memory, retrieval, and production patterns!

Getting Started

LangChain is a framework for developing applications powered by language models, enabling context-aware and reasoning applications.

Installation

bash
1# Core LangChain
2pip install langchain
3
4# With OpenAI
5pip install langchain openai
6
7# With all integrations
8pip install langchain-openai langchain-community
9
10# Optional: Document loaders and vector stores
11pip install langchain-community chromadb pypdf

Basic Setup

python
1import os
2from langchain_openai import ChatOpenAI
3from langchain.prompts import ChatPromptTemplate
4from langchain.schema.output_parser import StrOutputParser
5
6# Set API key
7os.environ["OPENAI_API_KEY"] = "your-api-key"
8
9# Initialize LLM
10llm = ChatOpenAI(model="gpt-4", temperature=0.7)
11
12# Simple invocation
13response = llm.invoke("What is LangChain?")
14print(response.content)

Core Concepts

LLMs and Chat Models

python
1from langchain_openai import OpenAI, ChatOpenAI
2from langchain.schema import HumanMessage, SystemMessage
3
4# Standard LLM (completion)
5llm = OpenAI(temperature=0.9)
6result = llm.invoke("Tell me a joke")
7
8# Chat Model
9chat = ChatOpenAI(model="gpt-4")
10messages = [
11    SystemMessage(content="You are a helpful assistant"),
12    HumanMessage(content="What is AI?")
13]
14response = chat.invoke(messages)

Prompts and Templates

python
1from langchain.prompts import (
2    ChatPromptTemplate,
3    PromptTemplate,
4    MessagesPlaceholder,
5    HumanMessagePromptTemplate,
6    SystemMessagePromptTemplate
7)
8
9# Simple Prompt Template
10template = PromptTemplate.from_template(
11    "Tell me a {adjective} joke about {topic}"
12)
13prompt = template.invoke({"adjective": "funny", "topic": "programming"})
14
15# Chat Prompt Template
16chat_template = ChatPromptTemplate.from_messages([
17    ("system", "You are a {role}"),
18    ("human", "{user_input}"),
19])
20
21# Few-shot prompting
22from langchain.prompts.few_shot import FewShotPromptTemplate
23
24examples = [
25    {"input": "happy", "output": "sad"},
26    {"input": "tall", "output": "short"},
27]
28
29example_template = PromptTemplate(
30    input_variables=["input", "output"],
31    template="Input: {input}\nOutput: {output}"
32)
33
34few_shot_prompt = FewShotPromptTemplate(
35    examples=examples,
36    example_prompt=example_template,
37    prefix="Give the antonym of every input",
38    suffix="Input: {adjective}\nOutput:",
39    input_variables=["adjective"],
40)

Chains

LLMChain (Basic)

python
1from langchain.chains import LLMChain
2from langchain_openai import ChatOpenAI
3from langchain.prompts import PromptTemplate
4
5llm = ChatOpenAI(temperature=0.7)
6prompt = PromptTemplate.from_template(
7    "Write a {length} {style} about {topic}"
8)
9
10chain = LLMChain(llm=llm, prompt=prompt)
11result = chain.invoke({
12    "length": "short",
13    "style": "poem",
14    "topic": "AI"
15})

LCEL (LangChain Expression Language)

python
1from langchain_openai import ChatOpenAI
2from langchain.prompts import ChatPromptTemplate
3from langchain.schema.output_parser import StrOutputParser
4
5# Modern chain syntax
6prompt = ChatPromptTemplate.from_template("Tell me about {topic}")
7llm = ChatOpenAI()
8output_parser = StrOutputParser()
9
10# Chain with | operator
11chain = prompt | llm | output_parser
12
13# Invoke
14result = chain.invoke({"topic": "LangChain"})
15
16# Streaming
17for chunk in chain.stream({"topic": "AI"}):
18    print(chunk, end="", flush=True)

Sequential Chains

python
1from langchain.chains import SequentialChain, LLMChain
2
3# First chain: Generate synopsis
4synopsis_chain = LLMChain(
5    llm=llm,
6    prompt=PromptTemplate.from_template(
7        "Write a synopsis for {title}"
8    ),
9    output_key="synopsis"
10)
11
12# Second chain: Generate review
13review_chain = LLMChain(
14    llm=llm,
15    prompt=PromptTemplate.from_template(
16        "Write a review based on this synopsis:\n{synopsis}"
17    ),
18    output_key="review"
19)
20
21# Combine chains
22overall_chain = SequentialChain(
23    chains=[synopsis_chain, review_chain],
24    input_variables=["title"],
25    output_variables=["synopsis", "review"]
26)
27
28result = overall_chain.invoke({"title": "AI Revolution"})

Router Chains

python
1from langchain.chains.router import MultiPromptChain
2from langchain.chains import ConversationChain
3
4# Define prompts for different domains
5physics_template = """You are a physics expert. {input}"""
6math_template = """You are a math expert. {input}"""
7
8prompt_infos = [
9    {
10        "name": "physics",
11        "description": "Good for physics questions",
12        "prompt_template": physics_template
13    },
14    {
15        "name": "math",
16        "description": "Good for math questions",
17        "prompt_template": math_template
18    }
19]
20
21# Create router chain
22chain = MultiPromptChain.from_prompts(
23    llm=llm,
24    prompt_infos=prompt_infos
25)
26
27chain.invoke("What is Newton's second law?")  # Routes to physics
28chain.invoke("What is calculus?")              # Routes to math

Memory

Conversation Buffer Memory

python
1from langchain.memory import ConversationBufferMemory
2from langchain.chains import ConversationChain
3
4memory = ConversationBufferMemory()
5conversation = ConversationChain(
6    llm=llm,
7    memory=memory,
8    verbose=True
9)
10
11conversation.invoke("Hi, I'm John")
12conversation.invoke("What's my name?")  # Remembers "John"
13
14# Access memory
15print(memory.load_memory_variables({}))

Conversation Summary Memory

python
1from langchain.memory import ConversationSummaryMemory
2
3memory = ConversationSummaryMemory(llm=llm)
4conversation = ConversationChain(
5    llm=llm,
6    memory=memory
7)
8
9# Automatically summarizes long conversations
10conversation.invoke("Tell me about AI")
11conversation.invoke("What are neural networks?")

Window Memory

python
1from langchain.memory import ConversationBufferWindowMemory
2
3# Keep only last k interactions
4memory = ConversationBufferWindowMemory(k=2)
5conversation = ConversationChain(llm=llm, memory=memory)
6
7conversation.invoke("Hi")
8conversation.invoke("How are you?")
9conversation.invoke("What's 2+2?")
10# Only remembers last 2 interactions

Agents

OpenAI Functions Agent

python
1from langchain.agents import AgentExecutor, create_openai_functions_agent
2from langchain_openai import ChatOpenAI
3from langchain.tools import Tool
4from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
5
6# Define tools
7def get_word_length(word: str) -> int:
8    """Returns the length of a word."""
9    return len(word)
10
11tools = [
12    Tool(
13        name="GetWordLength",
14        func=get_word_length,
15        description="Returns the length of a word"
16    )
17]
18
19# Create agent
20llm = ChatOpenAI(model="gpt-4", temperature=0)
21
22prompt = ChatPromptTemplate.from_messages([
23    ("system", "You are a helpful assistant"),
24    ("human", "{input}"),
25    MessagesPlaceholder(variable_name="agent_scratchpad"),
26])
27
28agent = create_openai_functions_agent(llm, tools, prompt)
29agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
30
31# Run agent
32result = agent_executor.invoke({
33    "input": "How many letters are in the word 'LangChain'?"
34})

ReAct Agent

python
1from langchain.agents import create_react_agent, AgentExecutor
2from langchain import hub
3
4# Pull ReAct prompt from hub
5prompt = hub.pull("hwchase17/react")
6
7# Create ReAct agent
8agent = create_react_agent(llm, tools, prompt)
9agent_executor = AgentExecutor(
10    agent=agent,
11    tools=tools,
12    verbose=True,
13    handle_parsing_errors=True
14)
15
16agent_executor.invoke({"input": "What is LangChain?"})

Custom Tools

python
1from langchain.tools import BaseTool
2from typing import Optional, Type
3from pydantic import BaseModel, Field
4
5class CalculatorInput(BaseModel):
6    a: int = Field(description="First number")
7    b: int = Field(description="Second number")
8
9class CalculatorTool(BaseTool):
10    name = "calculator"
11    description = "Useful for math calculations"
12    args_schema: Type[BaseModel] = CalculatorInput
13
14    def _run(self, a: int, b: int) -> int:
15        return a + b
16
17    async def _arun(self, a: int, b: int) -> int:
18        return self._run(a, b)
19
20# Use custom tool
21tools = [CalculatorTool()]

Retrieval (RAG)

Document Loaders

python
1from langchain_community.document_loaders import (
2    TextLoader,
3    PyPDFLoader,
4    WebBaseLoader,
5    DirectoryLoader
6)
7
8# Text file
9loader = TextLoader("file.txt")
10docs = loader.load()
11
12# PDF
13loader = PyPDFLoader("document.pdf")
14pages = loader.load_and_split()
15
16# Web page
17loader = WebBaseLoader("https://example.com")
18docs = loader.load()
19
20# Directory
21loader = DirectoryLoader("./docs", glob="**/*.txt")
22docs = loader.load()

Text Splitters

python
1from langchain.text_splitter import (
2    RecursiveCharacterTextSplitter,
3    CharacterTextSplitter,
4    TokenTextSplitter
5)
6
7# Recursive character splitter (recommended)
8splitter = RecursiveCharacterTextSplitter(
9    chunk_size=1000,
10    chunk_overlap=200,
11    separators=["\n\n", "\n", " ", ""]
12)
13chunks = splitter.split_documents(docs)
14
15# Token-based splitter
16token_splitter = TokenTextSplitter(
17    chunk_size=100,
18    chunk_overlap=20
19)

Vector Stores

python
1from langchain_openai import OpenAIEmbeddings
2from langchain_community.vectorstores import Chroma, FAISS
3
4embeddings = OpenAIEmbeddings()
5
6# Chroma
7vectorstore = Chroma.from_documents(
8    documents=chunks,
9    embedding=embeddings,
10    persist_directory="./chroma_db"
11)
12
13# FAISS
14vectorstore = FAISS.from_documents(chunks, embeddings)
15
16# Similarity search
17results = vectorstore.similarity_search("query", k=3)
18
19# With scores
20results = vectorstore.similarity_search_with_score("query")

Retrieval QA Chain

python
1from langchain.chains import RetrievalQA
2from langchain_openai import ChatOpenAI
3
4qa_chain = RetrievalQA.from_chain_type(
5    llm=ChatOpenAI(model="gpt-4"),
6    chain_type="stuff",
7    retriever=vectorstore.as_retriever()
8)
9
10response = qa_chain.invoke({"query": "What is LangChain?"})
11print(response["result"])

Conversational Retrieval

python
1from langchain.chains import ConversationalRetrievalChain
2from langchain.memory import ConversationBufferMemory
3
4memory = ConversationBufferMemory(
5    memory_key="chat_history",
6    return_messages=True
7)
8
9qa = ConversationalRetrievalChain.from_llm(
10    llm=ChatOpenAI(model="gpt-4"),
11    retriever=vectorstore.as_retriever(),
12    memory=memory
13)
14
15# First question
16qa.invoke({"question": "What is LangChain?"})
17
18# Follow-up (uses context)
19qa.invoke({"question": "Tell me more about it"})

Advanced Patterns

Streaming

python
1from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
2
3# Stream tokens as they're generated
4llm = ChatOpenAI(
5    streaming=True,
6    callbacks=[StreamingStdOutCallbackHandler()]
7)
8
9chain = prompt | llm | output_parser
10
11for chunk in chain.stream({"topic": "AI"}):
12    print(chunk, end="", flush=True)

Caching

python
1from langchain.cache import InMemoryCache, SQLiteCache
2import langchain
3
4# In-memory cache
5langchain.llm_cache = InMemoryCache()
6
7# SQLite cache (persistent)
8langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
9
10# Cached calls return instantly
11llm.invoke("What is AI?")  # Slow first time
12llm.invoke("What is AI?")  # Instant (cached)

Callbacks

python
1from langchain.callbacks.base import BaseCallbackHandler
2
3class CustomCallbackHandler(BaseCallbackHandler):
4    def on_llm_start(self, serialized, prompts, **kwargs):
5        print(f"LLM started with prompts: {prompts}")
6
7    def on_llm_end(self, response, **kwargs):
8        print(f"LLM finished: {response}")
9
10# Use callback
11llm = ChatOpenAI(callbacks=[CustomCallbackHandler()])

Output Parsers

python
1from langchain.output_parsers import (
2    PydanticOutputParser,
3    CommaSeparatedListOutputParser,
4    StructuredOutputParser
5)
6from pydantic import BaseModel, Field
7
8# Pydantic parser
9class Person(BaseModel):
10    name: str = Field(description="Person's name")
11    age: int = Field(description="Person's age")
12
13parser = PydanticOutputParser(pydantic_object=Person)
14
15prompt = PromptTemplate(
16    template="Extract person info.\n{format_instructions}\n{query}",
17    input_variables=["query"],
18    partial_variables={"format_instructions": parser.get_format_instructions()}
19)
20
21chain = prompt | llm | parser
22result = chain.invoke({"query": "John is 30 years old"})
23# Returns Person object
24
25# List parser
26list_parser = CommaSeparatedListOutputParser()
27result = chain.invoke("List 3 colors")  # Returns ['red', 'blue', 'green']

Best Practices

Error Handling

python
1from langchain.schema import OutputParserException
2
3try:
4    result = chain.invoke({"input": "query"})
5except OutputParserException as e:
6    print(f"Parsing error: {e}")
7    # Fallback logic
8except Exception as e:
9    print(f"Error: {e}")

Rate Limiting

python
1from langchain_openai import ChatOpenAI
2
3llm = ChatOpenAI(
4    request_timeout=60,
5    max_retries=3,
6    model_kwargs={
7        "frequency_penalty": 0.0,
8        "presence_penalty": 0.0
9    }
10)

Production Tips

python
1# Use async for better performance
2async def process_query(query: str):
3    result = await chain.ainvoke({"input": query})
4    return result
5
6# Batch processing
7batch_results = chain.batch([
8    {"input": "query1"},
9    {"input": "query2"}
10])
11
12# Environment variables
13import os
14os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
15os.environ["LANGCHAIN_TRACING_V2"] = "true"  # Enable tracing

Common Patterns

ReAct Pattern

python
1# Reasoning + Acting
2# 1. Thought: What do I need to do?
3# 2. Action: Which tool should I use?
4# 3. Observation: What did the tool return?
5# 4. Repeat until done

Chain of Thought

python
1prompt = ChatPromptTemplate.from_template(
2    "Let's think step by step.\n\n{question}"
3)
4chain = prompt | llm | output_parser

Self-Ask

python
# Agent asks follow-up questions to itself
from langchain.agents import create_self_ask_with_search_agent

Debugging

Verbose Mode

python
chain = ConversationChain(llm=llm, verbose=True)

LangSmith Tracing

python
1import os
2os.environ["LANGCHAIN_TRACING_V2"] = "true"
3os.environ["LANGCHAIN_API_KEY"] = "your-api-key"
4os.environ["LANGCHAIN_PROJECT"] = "my-project"

Debug Callbacks

python
1from langchain.callbacks import get_openai_callback
2
3with get_openai_callback() as cb:
4    result = chain.invoke({"input": "query"})
5    print(f"Tokens used: {cb.total_tokens}")
6    print(f"Cost: ${cb.total_cost}")

Resources

  • Documentation: python.langchain.com
  • LangSmith: Platform for debugging and monitoring
  • LangServe: Deploy LangChain apps as APIs
  • Community: Discord, GitHub discussions
Was this cheatsheet helpful? Let us know!