HTTP and Server-Sent Events
Agents can handle HTTP requests and stream responses using Server-Sent Events (SSE). This page covers the onRequest method and SSE patterns.
Define the onRequest method to handle HTTP requests to your agent:
import { Agent } from "agents";
export class APIAgent extends Agent { async onRequest(request) { const url = new URL(request.url);
// Route based on path if (url.pathname.endsWith("/status")) { return Response.json({ status: "ok", state: this.state }); }
if (url.pathname.endsWith("/action")) { if (request.method !== "POST") { return new Response("Method not allowed", { status: 405 }); } const data = await request.json(); await this.processAction(data.action); return Response.json({ success: true }); }
return new Response("Not found", { status: 404 }); }
async processAction(action) { // Handle the action }}import { Agent } from "agents";
export class APIAgent extends Agent<Env> { async onRequest(request: Request): Promise<Response> { const url = new URL(request.url);
// Route based on path if (url.pathname.endsWith("/status")) { return Response.json({ status: "ok", state: this.state }); }
if (url.pathname.endsWith("/action")) { if (request.method !== "POST") { return new Response("Method not allowed", { status: 405 }); } const data = await request.json<{ action: string }>(); await this.processAction(data.action); return Response.json({ success: true }); }
return new Response("Not found", { status: 404 }); }
async processAction(action: string) { // Handle the action }}SSE allows you to stream data to clients over a long-running HTTP connection. This is ideal for AI model responses that generate tokens incrementally.
Create an SSE stream manually using ReadableStream:
export class StreamAgent extends Agent { async onRequest(request) { const encoder = new TextEncoder();
const stream = new ReadableStream({ async start(controller) { // Send events controller.enqueue(encoder.encode("data: Starting...\n\n"));
for (let i = 1; i <= 5; i++) { await new Promise((r) => setTimeout(r, 500)); controller.enqueue(encoder.encode(`data: Step ${i} complete\n\n`)); }
controller.enqueue(encoder.encode("data: Done!\n\n")); controller.close(); }, });
return new Response(stream, { headers: { "Content-Type": "text/event-stream", "Cache-Control": "no-cache", Connection: "keep-alive", }, }); }}export class StreamAgent extends Agent<Env> { async onRequest(request: Request): Promise<Response> { const encoder = new TextEncoder();
const stream = new ReadableStream({ async start(controller) { // Send events controller.enqueue(encoder.encode("data: Starting...\n\n"));
for (let i = 1; i <= 5; i++) { await new Promise((r) => setTimeout(r, 500)); controller.enqueue(encoder.encode(`data: Step ${i} complete\n\n`)); }
controller.enqueue(encoder.encode("data: Done!\n\n")); controller.close(); }, });
return new Response(stream, { headers: { "Content-Type": "text/event-stream", "Cache-Control": "no-cache", Connection: "keep-alive", }, }); }}SSE messages follow a specific format:
data: your message here\n\nYou can also include event types and IDs:
event: update\nid: 123\ndata: {"count": 42}\n\nThe AI SDK ↗ provides built-in SSE streaming:
import { Agent } from "agents";import { streamText } from "ai";import { createOpenAI } from "@ai-sdk/openai";
export class ChatAgent extends Agent { async onRequest(request) { const { prompt } = await request.json();
const openai = createOpenAI({ apiKey: this.env.OPENAI_API_KEY, });
const result = streamText({ model: openai("gpt-4o"), prompt: prompt, });
return result.toTextStreamResponse(); }}import { Agent } from "agents";import { streamText } from "ai";import { createOpenAI } from "@ai-sdk/openai";
export class ChatAgent extends Agent<Env> { async onRequest(request: Request): Promise<Response> { const { prompt } = await request.json<{ prompt: string }>();
const openai = createOpenAI({ apiKey: this.env.OPENAI_API_KEY, });
const result = streamText({ model: openai("gpt-4o"), prompt: prompt, });
return result.toTextStreamResponse(); }}SSE connections can be long-lived. Handle client disconnects gracefully:
- Persist progress — Write to agent state so clients can resume
- Use agent routing — Clients can reconnect to the same agent instance without session stores
- No timeout limits — Cloudflare Workers have no effective limit on SSE response duration
export class ResumeAgent extends Agent { async onRequest(request) { const url = new URL(request.url); const lastEventId = request.headers.get("Last-Event-ID");
if (lastEventId) { // Client is resuming - send events after lastEventId return this.resumeStream(lastEventId); }
return this.startStream(); }
async startStream() { // Start new stream, saving progress to this.state }
async resumeStream(fromId) { // Resume from saved state }}export class ResumeAgent extends Agent<Env> { async onRequest(request: Request): Promise<Response> { const url = new URL(request.url); const lastEventId = request.headers.get("Last-Event-ID");
if (lastEventId) { // Client is resuming - send events after lastEventId return this.resumeStream(lastEventId); }
return this.startStream(); }
async startStream(): Promise<Response> { // Start new stream, saving progress to this.state }
async resumeStream(fromId: string): Promise<Response> { // Resume from saved state }}| Feature | WebSockets | SSE |
|---|---|---|
| Direction | Bi-directional | Server → Client only |
| Protocol | ws:// / wss:// | HTTP |
| Binary data | Yes | No (text only) |
| Reconnection | Manual | Automatic (browser) |
| Best for | Interactive apps, chat | Streaming responses, notifications |
Recommendation: Use WebSockets for interactive applications. Use SSE for streaming AI responses or server-push notifications.
Refer to WebSockets for WebSocket documentation.
Was this helpful?
- Resources
- API
- New to Cloudflare?
- Directory
- Sponsorships
- Open Source
- Support
- Help Center
- System Status
- Compliance
- GDPR
- Company
- cloudflare.com
- Our team
- Careers
- © 2026 Cloudflare, Inc.
- Privacy Policy
- Terms of Use
- Report Security Issues
- Trademark
-