# Getting Started URL: /docs Get assistant-ui running in 5 minutes with npm and your first chat component. *** title: Getting Started description: Get assistant-ui running in 5 minutes with npm and your first chat component. ------------------------------------------------------------------------------------------ import { InstallCommand } from "@/components/docs/fumadocs/install/install-command"; ## Quick Start The fastest way to get started with assistant-ui. ![animated gif showing the steps to create a new project](../../../../../.github/assets/assistant-ui-starter.gif) ### Initialize assistant-ui **Create a new project:** ```sh npx assistant-ui@latest create ``` Or choose a template: ```sh # Assistant Cloud - with persistence and thread management npx assistant-ui@latest create -t cloud # LangGraph npx assistant-ui@latest create -t langgraph # MCP support npx assistant-ui@latest create -t mcp ``` **Add to an existing project:** ```sh npx assistant-ui@latest init ``` ### Add API key Create a `.env` file with your API key: ``` OPENAI_API_KEY="sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" ``` ### Start the app ```sh npm run dev ``` ## Add to Existing Project If you prefer not to use the CLI, you can install components manually. ### Add assistant-ui ### Setup Backend Endpoint Install provider SDK: Add an API endpoint: ```ts title="/app/api/chat/route.ts" tab="OpenAI" import { openai } from "@ai-sdk/openai"; import { convertToModelMessages, streamText } from "ai"; export const maxDuration = 30; export async function POST(req: Request) { const { messages } = await req.json(); const result = streamText({ model: openai("gpt-4o-mini"), messages: convertToModelMessages(messages), }); return result.toUIMessageStreamResponse(); } ``` ```ts title="/app/api/chat/route.ts" tab="Anthropic" import { anthropic } from "@ai-sdk/anthropic"; import { convertToModelMessages, streamText } from "ai"; export const maxDuration = 30; export async function POST(req: Request) { const { messages } = await req.json(); const result = streamText({ model: anthropic("claude-3-5-sonnet-20240620"), messages: convertToModelMessages(messages), }); return result.toUIMessageStreamResponse(); } ``` ```ts title="/app/api/chat/route.ts" tab="Azure" import { azure } from "@ai-sdk/azure"; import { convertToModelMessages, streamText } from "ai"; export const maxDuration = 30; export async function POST(req: Request) { const { messages } = await req.json(); const result = streamText({ model: azure("your-deployment-name"), messages: convertToModelMessages(messages), }); return result.toUIMessageStreamResponse(); } ``` ```ts title="/app/api/chat/route.ts" tab="AWS" import { bedrock } from "@ai-sdk/amazon-bedrock"; import { convertToModelMessages, streamText } from "ai"; export const maxDuration = 30; export async function POST(req: Request) { const { messages } = await req.json(); const result = streamText({ model: bedrock("anthropic.claude-3-5-sonnet-20240620-v1:0"), messages: convertToModelMessages(messages), }); return result.toUIMessageStreamResponse(); } ``` ```ts title="/app/api/chat/route.ts" tab="Gemini" import { google } from "@ai-sdk/google"; import { convertToModelMessages, streamText } from "ai"; export const maxDuration = 30; export async function POST(req: Request) { const { messages } = await req.json(); const result = streamText({ model: google("gemini-2.0-flash"), messages: convertToModelMessages(messages), }); return result.toUIMessageStreamResponse(); } ``` ```ts title="/app/api/chat/route.ts" tab="GCP" import { vertex } from "@ai-sdk/google-vertex"; import { convertToModelMessages, streamText } from "ai"; export const maxDuration = 30; export async function POST(req: Request) { const { messages } = await req.json(); const result = streamText({ model: vertex("gemini-1.5-pro"), messages: convertToModelMessages(messages), }); return result.toUIMessageStreamResponse(); } ``` ```ts title="/app/api/chat/route.ts" tab="Groq" import { createOpenAI } from "@ai-sdk/openai"; import { convertToModelMessages, streamText } from "ai"; export const maxDuration = 30; const groq = createOpenAI({ apiKey: process.env.GROQ_API_KEY ?? "", baseURL: "https://api.groq.com/openai/v1", }); export async function POST(req: Request) { const { messages } = await req.json(); const result = streamText({ model: groq("llama3-70b-8192"), messages: convertToModelMessages(messages), }); return result.toUIMessageStreamResponse(); } ``` ```ts title="/app/api/chat/route.ts" tab="Fireworks" import { createOpenAI } from "@ai-sdk/openai"; import { convertToModelMessages, streamText } from "ai"; export const maxDuration = 30; const fireworks = createOpenAI({ apiKey: process.env.FIREWORKS_API_KEY ?? "", baseURL: "https://api.fireworks.ai/inference/v1", }); export async function POST(req: Request) { const { messages } = await req.json(); const result = streamText({ model: fireworks("accounts/fireworks/models/firefunction-v2"), messages: convertToModelMessages(messages), }); return result.toUIMessageStreamResponse(); } ``` ```ts title="/app/api/chat/route.ts" tab="Cohere" import { cohere } from "@ai-sdk/cohere"; import { convertToModelMessages, streamText } from "ai"; export const maxDuration = 30; export async function POST(req: Request) { const { messages } = await req.json(); const result = streamText({ model: cohere("command-r-plus"), messages: convertToModelMessages(messages), }); return result.toUIMessageStreamResponse(); } ``` ```ts title="/app/api/chat/route.ts" tab="Ollama" import { ollama } from "ollama-ai-provider-v2"; import { convertToModelMessages, streamText } from "ai"; export const maxDuration = 30; export async function POST(req: Request) { const { messages } = await req.json(); const result = streamText({ model: ollama("llama3"), messages: convertToModelMessages(messages), }); return result.toUIMessageStreamResponse(); } ``` ```ts title="/app/api/chat/route.ts" tab="Chrome AI" import { chromeai } from "chrome-ai"; import { convertToModelMessages, streamText } from "ai"; export const maxDuration = 30; export async function POST(req: Request) { const { messages } = await req.json(); const result = streamText({ model: chromeai(), messages: convertToModelMessages(messages), }); return result.toUIMessageStreamResponse(); } ``` Define environment variables: ```sh title="/.env.local" tab="OpenAI" OPENAI_API_KEY="sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" ``` ```sh title="/.env.local" tab="Anthropic" ANTHROPIC_API_KEY="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" ``` ```sh title="/.env.local" tab="Azure" AZURE_RESOURCE_NAME="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" AZURE_API_KEY="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" ``` ```sh title="/.env.local" tab="AWS" AWS_ACCESS_KEY_ID="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" AWS_SECRET_ACCESS_KEY="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" AWS_REGION="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" ``` ```sh title="/.env.local" tab="Gemini" GOOGLE_GENERATIVE_AI_API_KEY="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" ``` ```sh title="/.env.local" tab="GCP" GOOGLE_VERTEX_PROJECT="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" GOOGLE_VERTEX_LOCATION="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" GOOGLE_APPLICATION_CREDENTIALS="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" ``` ```sh title="/.env.local" tab="Groq" GROQ_API_KEY="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" ``` ```sh title="/.env.local" tab="Fireworks" FIREWORKS_API_KEY="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" ``` ```sh title="/.env.local" tab="Cohere" COHERE_API_KEY="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" ``` ```sh tab="Ollama" ``` ```sh tab="Chrome AI" ``` If you aren't using Next.js, you can also deploy this endpoint to Cloudflare Workers, or any other serverless platform. ### Use it in your app ```tsx title="/app/page.tsx" tab="Thread" import { AssistantRuntimeProvider } from "@assistant-ui/react"; import { useChatRuntime, AssistantChatTransport } from "@assistant-ui/react-ai-sdk"; import { ThreadList } from "@/components/assistant-ui/thread-list"; import { Thread } from "@/components/assistant-ui/thread"; const MyApp = () => { const runtime = useChatRuntime({ transport: new AssistantChatTransport({ api: "/api/chat", }), }); return (
); }; ``` ```tsx title="/app/page.tsx" tab="AssistantModal" // run `npx shadcn@latest add @assistant-ui/assistant-modal` import { AssistantRuntimeProvider } from "@assistant-ui/react"; import { useChatRuntime, AssistantChatTransport } from "@assistant-ui/react-ai-sdk"; import { AssistantModal } from "@/components/assistant-ui/assistant-modal"; const MyApp = () => { const runtime = useChatRuntime({ transport: new AssistantChatTransport({ api: "/api/chat", }), }); return ( ); }; ```
## What's Next?