LangChain Integration
Automatic instrumentation for LangChain chains, agents, and tools with zero code changes.
langchain >= 0.1.0ChainsAgentsTools
Installation
Terminal
pip install turingpulse_sdk turingpulse_sdk_langchain langchain langchain-openaiQuick Start
1. Initialize & Auto-Instrument
setup.py
from turingpulse_sdk import init, TuringPulseConfig
from turingpulse_sdk_langchain import instrument_langchain
# Initialize TuringPulse
init(TuringPulseConfig(
api_key="sk_live_your_api_key",
workflow_name="my-project",
))
# Enable auto-instrumentation for all LangChain components
instrument_langchain()2. Use LangChain Normally
chain.py
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
# Create a chain - it's automatically traced
llm = ChatOpenAI(model="gpt-4")
prompt = ChatPromptTemplate.from_template("Tell me a joke about {topic}")
chain = prompt | llm | StrOutputParser()
# Run the chain - traces are captured automatically
result = chain.invoke({"topic": "programming"})
print(result)ℹ️
Zero Code Changes
Once auto-instrumentation is enabled, all LangChain components are automatically traced. No decorators or wrappers needed.
What Gets Traced
- LLM Calls — Model, tokens, latency, cost
- Chain Executions — Full chain with all steps
- Agent Loops — Reasoning steps and tool calls
- Tool Invocations — Input, output, timing
- Retrievers — Documents retrieved
- Memory Operations — Chat history access
- Embeddings — Batch sizes, dimensions
- Errors — Exceptions with full context
Manual Instrumentation
manual.py
from turingpulse_sdk_langchain import instrument_langchain
# Create your chain
chain = prompt | llm | parser
# Wrap with instrumentation
instrumented_chain = instrument_langchain(
chain,
name="my-chain",
labels={"team": "support", "version": "v2"},
)
# Use the instrumented chain
result = instrumented_chain.invoke({"input": "Hello"})With Governance
governance.py
from turingpulse_sdk import GovernanceDirective
from turingpulse_sdk_langchain import instrument_langchain
instrumented_agent = instrument_langchain(
agent_executor,
name="financial-agent",
governance=GovernanceDirective(
hitl=True,
hatl=True,
reviewers=["compliance@company.com"],
escalation_channels=["pagerduty://critical"],
severity="high",
),
)With KPIs
kpis.py
from turingpulse_sdk import KPIConfig
from turingpulse_sdk_langchain import instrument_langchain
instrumented_chain = instrument_langchain(
chain,
name="document-qa",
kpis=[
KPIConfig(
kpi_id="latency_ms",
use_duration=True,
alert_threshold=8000,
comparator="gt",
),
KPIConfig(
kpi_id="total_tokens",
value=lambda ctx: ctx.metadata.get("total_tokens", 0),
alert_threshold=10000,
comparator="gt",
),
KPIConfig(
kpi_id="cost_usd",
value=lambda ctx: ctx.metadata.get("total_cost", 0),
alert_threshold=0.25,
comparator="gt",
),
],
)Streaming Support
streaming.py
# Streaming is fully supported
for chunk in instrumented_chain.stream({"input": "Hello"}):
print(chunk, end="", flush=True)
# Async streaming
async for chunk in instrumented_chain.astream({"input": "Hello"}):
print(chunk, end="", flush=True)Full Configuration
full-config.py
from turingpulse_sdk import GovernanceDirective, KPIConfig
from turingpulse_sdk_langchain import instrument_langchain
instrumented = instrument_langchain(
chain,
name="my-chain",
model="gpt-4o",
provider="openai",
governance=GovernanceDirective(
hatl=True,
reviewers=["qa@company.com"],
),
kpis=[
KPIConfig(kpi_id="latency_ms", use_duration=True, alert_threshold=5000),
],
metadata={"version": "v1"},
)
result = instrumented.invoke({"input": "Hello"})