Use LangChainChatAgent for conversational agents with streaming support.
import { ChatOpenAI } from "@langchain/openai"import { LangChainChatAgent } from "@reminix/langchain"import { serve } from "@reminix/runtime"const model = new ChatOpenAI({ model: "gpt-4o" })const chatbot = new LangChainChatAgent(model, { name: "chatbot", instructions: "You are a helpful assistant.",})serve({ agents: [chatbot] })
import { ChatOpenAI } from "@langchain/openai"import { LangChainTaskAgent } from "@reminix/langchain"import { serve } from "@reminix/runtime"const model = new ChatOpenAI({ model: "gpt-4o" })const analyzer = new LangChainTaskAgent(model, { name: "analyzer", instructions: "Analyze the sentiment of the given text.",})serve({ agents: [analyzer] })
Use LangChainThreadAgent for agents that manage full message history.
import { ChatOpenAI } from "@langchain/openai"import { LangChainThreadAgent } from "@reminix/langchain"import { serve } from "@reminix/runtime"const model = new ChatOpenAI({ model: "gpt-4o" })const assistant = new LangChainThreadAgent(model, { name: "assistant", instructions: "You are a helpful assistant that remembers context.",})serve({ agents: [assistant] })
Thread agents return the complete message array, including the assistant’s response appended to the input messages.