Implement zero-latency governance in LangChain pipelines across industries.
pip install aisentinel langchain
from langchain.agents import initialize_agent, Tool
from langchain.chat_models import ChatOpenAI
from aisentinel.integrations.langchain import SentinelGuard
llm = ChatOpenAI(model_name="gpt-4o")
sentinel_guard = SentinelGuard.from_env()
tools = [
Tool(
name="web_search",
func=sentinel_guard.wrap_tool(web_search_tool),
description="Search the web under AISentinel governance",
),
]
agent = initialize_agent(tools, llm, agent="conversational-react-description")
response = agent.run("Find executive contact info")
from aisentinel.rulepacks import load_rulepack
rulepack = load_rulepack("customer_service.yml")
guard = SentinelGuard(rulepack=rulepack)
highresearch_agent = initialize_agent(
tools=[sentinel_guard.wrap_tool(pdf_reader_tool)],
llm=llm,
)
research_compliance.ymlfrom aisentinel.testing import benchmark
results = benchmark(agent, cases="./benchmarks/langchain.jsonl")
print(results.latency_p95)
Expect <60 ms preflight overhead with local cache enabled.
sentinel_guard = SentinelGuard.from_pool([
{"tenant_id": "healthcare", "rulepack": "rulepacks/hipaa.yml"},
{"tenant_id": "finance", "rulepack": "rulepacks/finra.yml"},
])
agent = initialize_agent(tools=[sentinel_guard.wrap_tool(web_search_tool, tenant="finance")], llm=llm)
Switch tenants per conversation to isolate policy enforcement.