ElevenLabs UI
Introduction
ElevenLabs UI is a component library and custom registry built on top of shadcn/ui to help you build multimodal agentic experiences faster. It provides pre-built components for agents, transcription, audio & more.
Components are available via the @elevenlabs/cli command.
pnpm dlx @elevenlabs/cli@latest components add <component>
For example, to install the Orb component, you can run:
pnpm dlx @elevenlabs/cli@latest components add orb
Here are some basic examples of what you can achieve using components from ElevenLabs UI.
Customer Support
Tap to start voice chat
Start a conversation
Type a message or tap the voice button
"use client"
import { useCallback, useEffect, useRef, useState } from "react"
import type { ComponentProps } from "react"
import { useConversation } from "@elevenlabs/react"
import {
AudioLinesIcon,
CheckIcon,
CopyIcon,
PhoneOffIcon,
SendIcon,
} from "lucide-react"
import { cn } from "@/lib/utils"
import { Button } from "@/components/ui/button"
import {
Card,
CardContent,
CardFooter,
CardHeader,
} from "@/components/ui/card"
import {
Conversation,
ConversationContent,
ConversationEmptyState,
ConversationScrollButton,
} from "@/components/ui/conversation"
import { Input } from "@/components/ui/input"
import { Message, MessageContent } from "@/components/ui/message"
import { Orb } from "@/components/ui/orb"
import { Response } from "@/components/ui/response"
import { ShimmeringText } from "@/components/ui/shimmering-text"
import {
Tooltip,
TooltipContent,
TooltipProvider,
TooltipTrigger,
} from "@/components/ui/tooltip"
type SystemMessageType = "initial" | "connecting" | "connected" | "error"
interface ChatMessage {
role: "user" | "assistant"
content: string
timestamp?: Date
type?: SystemMessageType
}
const DEFAULT_AGENT = {
agentId: process.env.NEXT_PUBLIC_ELEVENLABS_AGENT_ID!,
name: "Customer Support",
description: "AI Voice Assistant",
}
type ChatActionsProps = ComponentProps<"div">
const ChatActions = ({ className, children, ...props }: ChatActionsProps) => (
<div className={cn("flex items-center gap-1", className)} {...props}>
{children}
</div>
)
type ChatActionProps = ComponentProps<typeof Button> & {
tooltip?: string
label?: string
}
const ChatAction = ({
tooltip,
children,
label,
className,
variant = "ghost",
size = "sm",
...props
}: ChatActionProps) => {
const button = (
<Button
className={cn(
"text-muted-foreground hover:text-foreground relative size-9 p-1.5",
className
)}
size={size}
type="button"
variant={variant}
{...props}
>
{children}
<span className="sr-only">{label || tooltip}</span>
</Button>
)
if (tooltip) {
return (
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>{button}</TooltipTrigger>
<TooltipContent>
<p>{tooltip}</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
)
}
return button
}
export default function Page() {
const [messages, setMessages] = useState<ChatMessage[]>([])
const [agentState, setAgentState] = useState<
"disconnected" | "connecting" | "connected" | "disconnecting" | null
>("disconnected")
const [textInput, setTextInput] = useState("")
const [copiedIndex, setCopiedIndex] = useState<number | null>(null)
const [errorMessage, setErrorMessage] = useState<string | null>(null)
const mediaStreamRef = useRef<MediaStream | null>(null)
const isTextOnlyModeRef = useRef<boolean>(true)
const conversation = useConversation({
onConnect: () => {
// Only clear messages for voice mode
if (!isTextOnlyModeRef.current) {
setMessages([])
}
},
onDisconnect: () => {
// Only clear messages for voice mode
if (!isTextOnlyModeRef.current) {
setMessages([])
}
},
onMessage: (message) => {
if (message.message) {
const newMessage: ChatMessage = {
role: message.source === "user" ? "user" : "assistant",
content: message.message,
}
setMessages((prev) => [...prev, newMessage])
}
},
onError: (error) => {
console.error("Error:", error)
setAgentState("disconnected")
},
onDebug: (debug) => {
console.log("Debug:", debug)
},
})
const getMicStream = useCallback(async () => {
if (mediaStreamRef.current) return mediaStreamRef.current
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
mediaStreamRef.current = stream
setErrorMessage(null)
return stream
} catch (error) {
if (error instanceof DOMException && error.name === "NotAllowedError") {
setErrorMessage("Please enable microphone permissions in your browser.")
}
throw error
}
}, [])
const startConversation = useCallback(
async (
textOnly: boolean = true,
skipConnectingMessage: boolean = false
) => {
try {
isTextOnlyModeRef.current = textOnly
if (!skipConnectingMessage) {
setMessages([])
}
if (!textOnly) {
await getMicStream()
}
await conversation.startSession({
agentId: DEFAULT_AGENT.agentId,
connectionType: textOnly ? "websocket" : "webrtc",
overrides: {
conversation: {
textOnly: textOnly,
},
agent: {
firstMessage: textOnly ? "" : undefined,
},
},
onStatusChange: (status) => setAgentState(status.status),
})
} catch (error) {
console.error(error)
setAgentState("disconnected")
setMessages([])
}
},
[conversation, getMicStream]
)
const handleCall = useCallback(async () => {
if (agentState === "disconnected" || agentState === null) {
setAgentState("connecting")
try {
await startConversation(false)
} catch {
setAgentState("disconnected")
}
} else if (agentState === "connected") {
conversation.endSession()
setAgentState("disconnected")
if (mediaStreamRef.current) {
mediaStreamRef.current.getTracks().forEach((t) => t.stop())
mediaStreamRef.current = null
}
}
}, [agentState, conversation, startConversation])
const handleTextInputChange = useCallback(
(e: React.ChangeEvent<HTMLInputElement>) => {
setTextInput(e.target.value)
},
[]
)
const handleSendText = useCallback(async () => {
if (!textInput.trim()) return
const messageToSend = textInput
if (agentState === "disconnected" || agentState === null) {
const userMessage: ChatMessage = {
role: "user",
content: messageToSend,
}
setTextInput("")
setAgentState("connecting")
try {
await startConversation(true, true)
// Add message once converstation started
setMessages([userMessage])
// Send message after connection is established
conversation.sendUserMessage(messageToSend)
} catch (error) {
console.error("Failed to start conversation:", error)
}
} else if (agentState === "connected") {
const newMessage: ChatMessage = {
role: "user",
content: messageToSend,
}
setMessages((prev) => [...prev, newMessage])
setTextInput("")
conversation.sendUserMessage(messageToSend)
}
}, [textInput, agentState, conversation, startConversation])
const handleKeyDown = useCallback(
(e: React.KeyboardEvent<HTMLInputElement>) => {
if (e.key === "Enter" && !e.shiftKey) {
e.preventDefault()
handleSendText()
}
},
[handleSendText]
)
useEffect(() => {
return () => {
if (mediaStreamRef.current) {
mediaStreamRef.current.getTracks().forEach((t) => t.stop())
}
}
}, [])
const isCallActive = agentState === "connected"
const isTransitioning =
agentState === "connecting" || agentState === "disconnecting"
const getInputVolume = useCallback(() => {
const rawValue = conversation.getInputVolume?.() ?? 0
return Math.min(1.0, Math.pow(rawValue, 0.5) * 2.5)
}, [conversation])
const getOutputVolume = useCallback(() => {
const rawValue = conversation.getOutputVolume?.() ?? 0
return Math.min(1.0, Math.pow(rawValue, 0.5) * 2.5)
}, [conversation])
return (
<Card
className={cn(
"mx-auto flex h-[380px] w-full flex-col gap-0 overflow-hidden"
)}
>
<CardHeader className="flex shrink-0 flex-row items-center justify-between pb-4">
<div className="flex items-center gap-4">
<div className="ring-border relative size-10 overflow-hidden rounded-full ring-1">
<Orb
className="h-full w-full"
volumeMode="manual"
getInputVolume={getInputVolume}
getOutputVolume={getOutputVolume}
/>
</div>
<div className="flex flex-col gap-0.5">
<p className="text-sm leading-none font-medium">
{DEFAULT_AGENT.name}
</p>
<div className="flex items-center gap-2">
{errorMessage ? (
<p className="text-destructive text-xs">{errorMessage}</p>
) : agentState === "disconnected" || agentState === null ? (
<p className="text-muted-foreground text-xs">
Tap to start voice chat
</p>
) : agentState === "connected" ? (
<p className="text-xs text-green-600">Connected</p>
) : isTransitioning ? (
<ShimmeringText
text={agentState}
className="text-xs capitalize"
/>
) : null}
</div>
</div>
</div>
<div
className={cn(
"flex h-2 w-2 rounded-full transition-all duration-300",
agentState === "connected" &&
"bg-green-500 shadow-[0_0_8px_rgba(34,197,94,0.5)]",
isTransitioning && "animate-pulse bg-white/40"
)}
/>
</CardHeader>
<CardContent className="flex-1 overflow-hidden p-0">
<Conversation className="h-full">
<ConversationContent className="flex min-w-0 flex-col gap-2 p-6 pb-2">
{messages.length === 0 ? (
<ConversationEmptyState
icon={<Orb className="size-12" />}
title={
agentState === "connecting" ? (
<ShimmeringText text="Starting conversation" />
) : agentState === "connected" ? (
<ShimmeringText text="Start talking or type" />
) : (
"Start a conversation"
)
}
description={
agentState === "connecting"
? "Connecting..."
: agentState === "connected"
? "Ready to chat"
: "Type a message or tap the voice button"
}
/>
) : (
messages.map((message, index) => {
return (
<div key={index} className="flex w-full flex-col gap-1">
<Message from={message.role}>
<MessageContent className="max-w-full min-w-0">
<Response className="w-auto [overflow-wrap:anywhere] whitespace-pre-wrap">
{message.content}
</Response>
</MessageContent>
{message.role === "assistant" && (
<div className="ring-border size-6 flex-shrink-0 self-end overflow-hidden rounded-full ring-1">
<Orb
className="h-full w-full"
agentState={
isCallActive && index === messages.length - 1
? "talking"
: null
}
/>
</div>
)}
</Message>
{message.role === "assistant" && (
<ChatActions>
<ChatAction
size="sm"
tooltip={copiedIndex === index ? "Copied!" : "Copy"}
onClick={() => {
navigator.clipboard.writeText(message.content)
setCopiedIndex(index)
setTimeout(() => setCopiedIndex(null), 2000)
}}
>
{copiedIndex === index ? (
<CheckIcon className="size-4" />
) : (
<CopyIcon className="size-4" />
)}
</ChatAction>
</ChatActions>
)}
</div>
)
})
)}
</ConversationContent>
<ConversationScrollButton />
</Conversation>
</CardContent>
<CardFooter className="shrink-0 border-t">
<div className="flex w-full items-center gap-2">
<div className="flex flex-1 items-center gap-2">
<Input
value={textInput}
onChange={handleTextInputChange}
onKeyDown={handleKeyDown}
placeholder="Type a message..."
className="h-9 focus-visible:ring-0 focus-visible:ring-offset-0"
disabled={isTransitioning}
/>
<Button
onClick={handleSendText}
size="icon"
variant="ghost"
className="rounded-full"
disabled={!textInput.trim() || isTransitioning}
>
<SendIcon className="size-4" />
<span className="sr-only">Send message</span>
</Button>
{!isCallActive && (
<Button
onClick={handleCall}
size="icon"
variant="ghost"
className={cn("relative shrink-0 rounded-full transition-all")}
disabled={isTransitioning}
>
<AudioLinesIcon className="size-4" />
<span className="sr-only">Start voice call</span>
</Button>
)}
{isCallActive && (
<Button
onClick={handleCall}
size="icon"
variant="secondary"
className={cn("relative shrink-0 rounded-full transition-all")}
disabled={isTransitioning}
>
<PhoneOffIcon className="size-4" />
<span className="sr-only">End call</span>
</Button>
)}
</div>
</div>
</CardFooter>
</Card>
)
}
0.00s
"use client"
import { Fragment, useCallback, useEffect, useRef, useState } from "react"
import { Copy } from "lucide-react"
import { Streamdown } from "streamdown"
import { cn } from "@/lib/utils"
import {
transcribeAudio,
type TranscriptionResult,
} from "@/app/transcriber-01/actions/transcribe"
import { Button } from "@/components/ui/button"
import { Card } from "@/components/ui/card"
import { LiveWaveform } from "@/components/ui/live-waveform"
import { ScrollArea } from "@/components/ui/scroll-area"
import { Separator } from "@/components/ui/separator"
interface RecordingState {
isRecording: boolean
isProcessing: boolean
transcript: string
error: string
transcriptionTime?: number
}
export default function Transcriber01() {
const [recording, setRecording] = useState<RecordingState>({
isRecording: false,
isProcessing: false,
transcript: "",
error: "",
})
const mediaRecorderRef = useRef<MediaRecorder | null>(null)
const audioChunksRef = useRef<Blob[]>([])
const streamRef = useRef<MediaStream | null>(null)
const updateRecording = useCallback((updates: Partial<RecordingState>) => {
setRecording((prev) => ({ ...prev, ...updates }))
}, [])
const cleanupStream = useCallback(() => {
if (streamRef.current) {
streamRef.current.getTracks().forEach((track) => track.stop())
streamRef.current = null
}
}, [])
const stopRecording = useCallback(() => {
if (mediaRecorderRef.current?.state !== "inactive") {
mediaRecorderRef.current?.stop()
}
cleanupStream()
updateRecording({ isRecording: false })
}, [cleanupStream, updateRecording])
const processAudio = useCallback(
async (audioBlob: Blob) => {
updateRecording({ isProcessing: true, error: "" })
try {
const result: TranscriptionResult = await transcribeAudio({
audio: new File([audioBlob], "recording.webm", {
type: "audio/webm",
}),
})
if (result.error) {
throw new Error(result.error)
}
updateRecording({
transcript: result.text || "",
transcriptionTime: result.transcriptionTime,
isProcessing: false,
})
} catch (err) {
console.error("Transcription error:", err)
updateRecording({
error:
err instanceof Error ? err.message : "Failed to transcribe audio",
isProcessing: false,
})
}
},
[updateRecording]
)
const startRecording = useCallback(async () => {
try {
updateRecording({
transcript: "",
error: "",
transcriptionTime: undefined,
})
audioChunksRef.current = []
const stream =
await navigator.mediaDevices.getUserMedia(AUDIO_CONSTRAINTS)
streamRef.current = stream
const mimeType = getMimeType()
const mediaRecorder = new MediaRecorder(stream, { mimeType })
mediaRecorderRef.current = mediaRecorder
mediaRecorder.ondataavailable = (event: BlobEvent) => {
if (event.data.size > 0) {
audioChunksRef.current.push(event.data)
}
}
mediaRecorder.onstop = () => {
const audioBlob = new Blob(audioChunksRef.current, { type: mimeType })
processAudio(audioBlob)
}
mediaRecorder.start()
updateRecording({ isRecording: true })
} catch (err) {
updateRecording({
error: "Microphone permission denied",
isRecording: false,
})
console.error("Microphone error:", err)
}
}, [processAudio, updateRecording])
const handleRecordToggle = useCallback(() => {
if (recording.isRecording) {
stopRecording()
} else {
startRecording()
}
}, [recording.isRecording, startRecording, stopRecording])
useEffect(() => {
const handleKeyDown = (e: KeyboardEvent) => {
if (e.altKey && e.code === "Space") {
e.preventDefault()
handleRecordToggle()
}
}
window.addEventListener("keydown", handleKeyDown)
return () => window.removeEventListener("keydown", handleKeyDown)
}, [handleRecordToggle])
useEffect(() => {
return cleanupStream
}, [cleanupStream])
return (
<div className="mx-auto w-full">
<Card className="border-border relative m-0 gap-0 overflow-hidden p-0 shadow-2xl">
<div className="relative py-6">
<div className="flex h-32 items-center justify-center">
{recording.isProcessing && <TranscriberProcessing />}
{(Boolean(recording.transcript) || Boolean(recording.error)) && (
<TranscriberTranscript
transcript={recording.transcript}
error={recording.error}
/>
)}
{!recording.isProcessing &&
!Boolean(recording.transcript) &&
!Boolean(recording.error) && (
<LiveWaveform
active={recording.isRecording}
barWidth={5}
barGap={2}
barRadius={8}
barColor="#71717a"
fadeEdges
fadeWidth={48}
sensitivity={0.8}
smoothingTimeConstant={0.85}
className="w-full"
/>
)}
</div>
</div>
<Separator />
<div className="bg-card px-4 py-2">
<div className="flex items-center justify-between">
<div className="flex items-center gap-3">
<span
className={cn(
"text-muted-foreground/60 font-mono text-[10px] tracking-widest uppercase",
(recording.transcriptionTime &&
Boolean(recording.transcript)) ||
Boolean(recording.error)
? "animate-in fade-in duration-500"
: "opacity-0"
)}
>
{recording.error
? "Error"
: recording.transcriptionTime
? `${(recording.transcriptionTime / 1000).toFixed(2)}s`
: "0.00s"}
</span>
</div>
<div className="flex items-center gap-3">
<Button
variant="outline"
size="sm"
className="gap-2"
onClick={handleRecordToggle}
disabled={recording.isProcessing}
aria-label={
recording.isRecording ? "Stop recording" : "Start recording"
}
>
{recording.isRecording || recording.isProcessing
? "Stop"
: "Record"}
<kbd className="bg-muted text-muted-foreground pointer-events-none inline-flex h-5 items-center gap-1 rounded border px-1.5 font-mono text-[10px] font-medium select-none">
<span className="text-xs">⌥</span>Space
</kbd>
</Button>
</div>
</div>
</div>
</Card>
</div>
)
}
const TranscriberProcessing = () => {
return (
<LiveWaveform
active={false}
processing
barWidth={4}
barGap={1}
barRadius={8}
barColor="#71717a"
fadeEdges
fadeWidth={48}
className="w-full opacity-60"
/>
)
}
const TranscriberTranscript = ({
transcript,
error,
}: {
transcript: string
error: string
}) => {
const displayText = error || transcript
return (
<Fragment>
<div className="relative w-full max-w-2xl px-6">
<ScrollArea className="h-32 w-full">
<div
className={cn(
"text-foreground py-1 pr-8 text-left text-sm leading-relaxed",
error && "text-red-500"
)}
>
<Streamdown>{displayText}</Streamdown>
</div>
</ScrollArea>
{transcript && !error && (
<Button
variant="ghost"
size="icon"
className="absolute top-1 right-2 h-6 w-6 opacity-50 transition-opacity hover:opacity-100"
onClick={() => {
navigator.clipboard.writeText(transcript)
}}
aria-label="Copy transcript"
>
<Copy className="h-3.5 w-3.5" />
</Button>
)}
</div>
</Fragment>
)
}
const AUDIO_CONSTRAINTS: MediaStreamConstraints = {
audio: {
echoCancellation: true,
noiseSuppression: true,
autoGainControl: true,
},
}
const SUPPORTED_MIME_TYPES = ["audio/webm;codecs=opus", "audio/webm"] as const
function getMimeType(): string {
for (const type of SUPPORTED_MIME_TYPES) {
if (MediaRecorder.isTypeSupported(type)) {
return type
}
}
return "audio/webm"
}
Voice Fill
Powered by ElevenLabs Scribe
"use client"
import { useCallback, useEffect, useRef, useState } from "react"
import { zodResolver } from "@hookform/resolvers/zod"
import { useForm } from "react-hook-form"
import { cn } from "@/lib/utils"
import { voiceToFormAction } from "@/app/voice-form/actions/voice-to-form"
import {
exampleFormSchema,
ExampleFormValues,
} from "@/app/voice-form/schema"
import { Button } from "@/components/ui/button"
import {
Card,
CardContent,
CardDescription,
CardHeader,
CardTitle,
} from "@/components/ui/card"
import {
Form,
FormControl,
FormField,
FormItem,
FormLabel,
FormMessage,
} from "@/components/ui/form"
import { Input } from "@/components/ui/input"
import { VoiceButton } from "@/components/ui/voice-button"
const AUDIO_CONSTRAINTS: MediaStreamConstraints = {
audio: {
echoCancellation: true,
noiseSuppression: true,
autoGainControl: true,
},
}
const SUPPORTED_MIME_TYPES = ["audio/webm;codecs=opus", "audio/webm"] as const
function getMimeType(): string {
for (const type of SUPPORTED_MIME_TYPES) {
if (MediaRecorder.isTypeSupported(type)) {
return type
}
}
return "audio/webm"
}
export default function Page() {
const [isRecording, setIsRecording] = useState(false)
const [isProcessing, setIsProcessing] = useState(false)
const [error, setError] = useState("")
const [success, setSuccess] = useState(false)
const mediaRecorderRef = useRef<MediaRecorder | null>(null)
const audioChunksRef = useRef<Blob[]>([])
const streamRef = useRef<MediaStream | null>(null)
const form = useForm<ExampleFormValues>({
resolver: zodResolver(exampleFormSchema),
defaultValues: {
firstName: "",
lastName: "",
},
mode: "onChange",
})
const cleanupStream = useCallback(() => {
if (streamRef.current) {
streamRef.current.getTracks().forEach((track) => track.stop())
streamRef.current = null
}
}, [])
const processAudio = useCallback(
async (audioBlob: Blob) => {
setIsProcessing(true)
setError("")
setSuccess(false)
try {
const audioFile = new File([audioBlob], "audio.webm", {
type: audioBlob.type,
})
const result = await voiceToFormAction(audioFile)
if (result.data && Object.keys(result.data).length > 0) {
Object.entries(result.data).forEach(([key, value]) => {
if (value) {
form.setValue(key as keyof ExampleFormValues, value as string, {
shouldValidate: true,
})
}
})
setSuccess(true)
setTimeout(() => setSuccess(false), 2000)
}
} catch (err) {
console.error("Voice input error:", err)
setError(err instanceof Error ? err.message : "Failed to process audio")
} finally {
setIsProcessing(false)
}
},
[form]
)
const stopRecording = useCallback(() => {
if (mediaRecorderRef.current?.state !== "inactive") {
mediaRecorderRef.current?.stop()
}
cleanupStream()
setIsRecording(false)
}, [cleanupStream])
const startRecording = useCallback(async () => {
try {
setError("")
audioChunksRef.current = []
const stream =
await navigator.mediaDevices.getUserMedia(AUDIO_CONSTRAINTS)
streamRef.current = stream
const mimeType = getMimeType()
const mediaRecorder = new MediaRecorder(stream, { mimeType })
mediaRecorderRef.current = mediaRecorder
mediaRecorder.ondataavailable = (event: BlobEvent) => {
if (event.data.size > 0) {
audioChunksRef.current.push(event.data)
}
}
mediaRecorder.onstop = () => {
const audioBlob = new Blob(audioChunksRef.current, { type: mimeType })
processAudio(audioBlob)
}
mediaRecorder.start()
setIsRecording(true)
} catch (err) {
setError("Microphone permission denied")
console.error("Microphone error:", err)
}
}, [processAudio])
const handleVoiceToggle = useCallback(() => {
if (isRecording) {
stopRecording()
} else {
startRecording()
}
}, [isRecording, startRecording, stopRecording])
useEffect(() => {
return cleanupStream
}, [cleanupStream])
const onSubmit = (data: ExampleFormValues) => {
console.log("Form submitted:", data)
}
const voiceState = isProcessing
? "processing"
: isRecording
? "recording"
: success
? "success"
: error
? "error"
: "idle"
return (
<div className="mx-auto w-full">
<Card className="relative overflow-hidden">
<div className={cn("flex flex-col gap-2")}>
<CardHeader>
<div className="flex items-start justify-between">
<div className="space-y-1">
<CardTitle>Voice Fill</CardTitle>
<CardDescription>Powered by ElevenLabs Scribe</CardDescription>
</div>
<VoiceButton
state={voiceState}
onPress={handleVoiceToggle}
disabled={isProcessing}
trailing="Voice Fill"
/>
</div>
</CardHeader>
<CardContent>
<Form {...form}>
<form
onSubmit={form.handleSubmit(onSubmit)}
className="space-y-6"
>
<div className="grid grid-cols-1 gap-4 sm:grid-cols-2">
<FormField
control={form.control}
name="firstName"
render={({ field }) => (
<FormItem>
<FormLabel>First Name *</FormLabel>
<FormControl>
<Input placeholder="John" {...field} />
</FormControl>
<FormMessage />
</FormItem>
)}
/>
<FormField
control={form.control}
name="lastName"
render={({ field }) => (
<FormItem>
<FormLabel>Last Name *</FormLabel>
<FormControl>
<Input placeholder="Doe" {...field} />
</FormControl>
<FormMessage />
</FormItem>
)}
/>
</div>
</form>
</Form>
</CardContent>
</div>
</Card>
</div>
)
}
You can explore more examples here or view the source code for all components on GitHub.
Deploy and Scale Agents with ElevenLabs
ElevenLabs delivers the infrastructure and developer experience you need to ship reliable audio & agent applications at scale.
Talk to an expert