extensions/cli/src/e2e/spec.md
This document describes the principles, practices, and patterns for writing end-to-end (E2E) tests for the Continue CLI.
End-to-end tests verify that the CLI functions correctly as a whole by executing the actual CLI binary and validating its behavior from the user's perspective. These tests help ensure that all components work together properly in a real-world environment.
A typical E2E test follows this structure:
describe("E2E: Feature", () => {
let context: any;
beforeEach(async () => {
context = await createTestContext();
});
afterEach(async () => {
await cleanupTestContext(context);
});
it("should do something", async () => {
const result = await runCLI(context, {
args: ["command", "--flag"],
});
expect(result.exitCode).toBe(0);
expect(result.stdout).toContain("Expected output");
});
});
The test context (createTestContext()) provides:
context.testDir)context.cliPath)// Run CLI with arguments
const result = await runCLI(context, {
args: ["--help"],
});
// Verify output and exit code
expect(result.exitCode).toBe(0);
expect(result.stdout).toContain("Continue CLI");
// Run CLI with input
const result = await runCLI(context, {
args: ["-p", "Prompt with input"],
input: "User input\n",
});
// For complex interactive flows
const result = await withInteractiveInput(
context,
["chat"],
["First input", "Second input"],
);
// Test error scenarios
const result = await runCLI(context, {
args: ["--invalid-flag"],
expectError: true,
});
expect(result.exitCode).not.toBe(0);
expect(result.stderr).toContain("error");
For tests involving AI models, we use a mock LLM server:
let mockServer: MockLLMServer;
beforeEach(async () => {
context = await createTestContext();
mockServer = await setupMockLLMTest(context, {
response: "Hello World!",
});
});
afterEach(async () => {
await cleanupMockLLMServer(mockServer);
await cleanupTestContext(context);
});
it("should get response from mock LLM", async () => {
const result = await runCLI(context, {
args: ["-p", "--config", context.configPath, "Hi"],
});
expect(result.stdout).toContain("Hello World!");
});
Dynamic responses based on prompts:
mockServer = await setupMockLLMTest(context, {
response: (prompt) =>
prompt.includes("weather") ? "It's sunny!" : "I don't know",
});
Non-streaming responses:
mockServer = await setupMockLLMTest(context, {
response: "All at once response",
streaming: false,
});
Request tracking:
expect(mockServer.requests).toHaveLength(1);
expect(mockServer.requests[0].body.messages[0].content).toBe("User prompt");
const configPath = await createTestConfig(context, {
// Config object
});
// Create a mock session
await createMockSession(context, [
{ role: "user", content: "Hello" },
{ role: "assistant", content: "Hi there!" },
]);
// Read the session
const session = await readSession(context);
const result = await runCLI(context, {
args: ["-p", "Prompt in headless mode"],
});
const result = await withInteractiveInput(
context,
["chat"],
["User input", "/exit"],
);
const result = await runCLI(context, {
args: ["login", "--help"],
});
}, 30000); (30 seconds)console.log(result.stdout, result.stderr)console.log(context.testDir)