Optional
config: TestingAgentConfigOptional configuration for the agent.
General configuration for a testing agent.
Optional
maxTokens?: numberThe maximum number of tokens to generate.
Optional
model?: LanguageModelV1The language model to use for generating responses. If not provided, a default model will be used.
Optional
name?: stringThe name of the agent.
Optional
systemPrompt?: stringSystem prompt to use for the agent.
Useful in more complex scenarios where you want to set the system prompt for the agent directly. If left blank, this will be automatically generated from the scenario description.
Optional
temperature?: numberThe temperature for the language model. Defaults to 0.
import { run, userSimulatorAgent, AgentRole, user, agent, AgentAdapter } from '@langwatch/scenario';
const myAgent: AgentAdapter = {
role: AgentRole.AGENT,
async call(input) {
return `The user said: ${input.messages.at(-1)?.content}`;
}
};
async function main() {
// Basic user simulator with default behavior
const basicResult = await run({
name: "User Simulator Test",
description: "A simple test to see if the user simulator works.",
agents: [myAgent, userSimulatorAgent()],
script: [
user(),
agent(),
],
});
// Customized user simulator
const customResult = await run({
name: "Expert User Test",
description: "User seeks help with TypeScript programming",
agents: [
myAgent,
userSimulatorAgent({
model: openai("gpt-4"),
temperature: 0.3,
systemPrompt: "You are a technical user who asks detailed questions"
})
],
script: [
user(),
agent(),
],
});
// User simulator with custom persona
const expertResult = await run({
name: "Expert Developer Test",
description: "Testing with a technical expert user persona.",
agents: [
myAgent,
userSimulatorAgent({
systemPrompt: `
You are an expert software developer testing an AI coding assistant.
Ask challenging, technical questions and be demanding about code quality.
Use technical jargon and expect detailed, accurate responses.
`
})
],
script: [
user(),
agent(),
],
});
}
main();
Agent that simulates realistic user behavior in scenario conversations.
This agent generates user messages that are appropriate for the given scenario context, simulating how a real human user would interact with the agent under test. It uses an LLM to generate natural, contextually relevant user inputs that help drive the conversation forward according to the scenario description.