Examples
A collection of building blocks for agents and audio that you can customize and extend.
Files
"use client"
import { useCallback, useEffect, useRef, useState } from "react"
import type { ComponentProps } from "react"
import { useConversation } from "@elevenlabs/react"
import {
AudioLinesIcon,
CheckIcon,
CopyIcon,
PhoneOffIcon,
SendIcon,
} from "lucide-react"
import { cn } from "@/lib/utils"
import { Button } from "@/components/ui/button"
import {
Card,
CardContent,
CardFooter,
CardHeader,
} from "@/components/ui/card"
import {
Conversation,
ConversationContent,
ConversationEmptyState,
ConversationScrollButton,
} from "@/components/ui/conversation"
import { Input } from "@/components/ui/input"
import { Message, MessageContent } from "@/components/ui/message"
import { Orb } from "@/components/ui/orb"
import { Response } from "@/components/ui/response"
import { ShimmeringText } from "@/components/ui/shimmering-text"
import {
Tooltip,
TooltipContent,
TooltipProvider,
TooltipTrigger,
} from "@/components/ui/tooltip"
type SystemMessageType = "initial" | "connecting" | "connected" | "error"
interface ChatMessage {
role: "user" | "assistant"
content: string
timestamp?: Date
type?: SystemMessageType
}
const DEFAULT_AGENT = {
agentId: process.env.NEXT_PUBLIC_ELEVENLABS_AGENT_ID!,
name: "Customer Support",
description: "AI Voice Assistant",
}
type ChatActionsProps = ComponentProps<"div">
const ChatActions = ({ className, children, ...props }: ChatActionsProps) => (
<div className={cn("flex items-center gap-1", className)} {...props}>
{children}
</div>
)
type ChatActionProps = ComponentProps<typeof Button> & {
tooltip?: string
label?: string
}
const ChatAction = ({
tooltip,
children,
label,
className,
variant = "ghost",
size = "sm",
...props
}: ChatActionProps) => {
const button = (
<Button
className={cn(
"text-muted-foreground hover:text-foreground relative size-9 p-1.5",
className
)}
size={size}
type="button"
variant={variant}
{...props}
>
{children}
<span className="sr-only">{label || tooltip}</span>
</Button>
)
if (tooltip) {
return (
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>{button}</TooltipTrigger>
<TooltipContent>
<p>{tooltip}</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
)
}
return button
}
export default function Page() {
const [messages, setMessages] = useState<ChatMessage[]>([])
const [agentState, setAgentState] = useState<
"disconnected" | "connecting" | "connected" | "disconnecting" | null
>("disconnected")
const [textInput, setTextInput] = useState("")
const [copiedIndex, setCopiedIndex] = useState<number | null>(null)
const [errorMessage, setErrorMessage] = useState<string | null>(null)
const mediaStreamRef = useRef<MediaStream | null>(null)
const isTextOnlyModeRef = useRef<boolean>(true)
const conversation = useConversation({
onConnect: () => {
// Only clear messages for voice mode
if (!isTextOnlyModeRef.current) {
setMessages([])
}
},
onDisconnect: () => {
// Only clear messages for voice mode
if (!isTextOnlyModeRef.current) {
setMessages([])
}
},
onMessage: (message) => {
if (message.message) {
const newMessage: ChatMessage = {
role: message.source === "user" ? "user" : "assistant",
content: message.message,
}
setMessages((prev) => [...prev, newMessage])
}
},
onError: (error) => {
console.error("Error:", error)
setAgentState("disconnected")
},
onDebug: (debug) => {
console.log("Debug:", debug)
},
})
const getMicStream = useCallback(async () => {
if (mediaStreamRef.current) return mediaStreamRef.current
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
mediaStreamRef.current = stream
setErrorMessage(null)
return stream
} catch (error) {
if (error instanceof DOMException && error.name === "NotAllowedError") {
setErrorMessage("Please enable microphone permissions in your browser.")
}
throw error
}
}, [])
const startConversation = useCallback(
async (
textOnly: boolean = true,
skipConnectingMessage: boolean = false
) => {
try {
isTextOnlyModeRef.current = textOnly
if (!skipConnectingMessage) {
setMessages([])
}
if (!textOnly) {
await getMicStream()
}
await conversation.startSession({
agentId: DEFAULT_AGENT.agentId,
connectionType: textOnly ? "websocket" : "webrtc",
overrides: {
conversation: {
textOnly: textOnly,
},
agent: {
firstMessage: textOnly ? "" : undefined,
},
},
onStatusChange: (status) => setAgentState(status.status),
})
} catch (error) {
console.error(error)
setAgentState("disconnected")
setMessages([])
}
},
[conversation, getMicStream]
)
const handleCall = useCallback(async () => {
if (agentState === "disconnected" || agentState === null) {
setAgentState("connecting")
try {
await startConversation(false)
} catch {
setAgentState("disconnected")
}
} else if (agentState === "connected") {
conversation.endSession()
setAgentState("disconnected")
if (mediaStreamRef.current) {
mediaStreamRef.current.getTracks().forEach((t) => t.stop())
mediaStreamRef.current = null
}
}
}, [agentState, conversation, startConversation])
const handleTextInputChange = useCallback(
(e: React.ChangeEvent<HTMLInputElement>) => {
setTextInput(e.target.value)
},
[]
)
const handleSendText = useCallback(async () => {
if (!textInput.trim()) return
const messageToSend = textInput
if (agentState === "disconnected" || agentState === null) {
const userMessage: ChatMessage = {
role: "user",
content: messageToSend,
}
setTextInput("")
setAgentState("connecting")
setMessages([userMessage])
try {
await startConversation(true, true)
// Send message after connection is established
conversation.sendUserMessage(messageToSend)
} catch (error) {
console.error("Failed to start conversation:", error)
}
} else if (agentState === "connected") {
const newMessage: ChatMessage = {
role: "user",
content: messageToSend,
}
setMessages((prev) => [...prev, newMessage])
setTextInput("")
conversation.sendUserMessage(messageToSend)
}
}, [textInput, agentState, conversation, startConversation])
const handleKeyDown = useCallback(
(e: React.KeyboardEvent<HTMLInputElement>) => {
if (e.key === "Enter" && !e.shiftKey) {
e.preventDefault()
handleSendText()
}
},
[handleSendText]
)
useEffect(() => {
return () => {
if (mediaStreamRef.current) {
mediaStreamRef.current.getTracks().forEach((t) => t.stop())
}
}
}, [])
const isCallActive = agentState === "connected"
const isTransitioning =
agentState === "connecting" || agentState === "disconnecting"
const getInputVolume = useCallback(() => {
const rawValue = conversation.getInputVolume?.() ?? 0
return Math.min(1.0, Math.pow(rawValue, 0.5) * 2.5)
}, [conversation])
const getOutputVolume = useCallback(() => {
const rawValue = conversation.getOutputVolume?.() ?? 0
return Math.min(1.0, Math.pow(rawValue, 0.5) * 2.5)
}, [conversation])
return (
<Card
className={cn(
"mx-auto flex h-[380px] w-full flex-col gap-0 overflow-hidden"
)}
>
<CardHeader className="flex shrink-0 flex-row items-center justify-between pb-4">
<div className="flex items-center gap-4">
<div className="ring-border relative size-10 overflow-hidden rounded-full ring-1">
<Orb
className="h-full w-full"
volumeMode="manual"
getInputVolume={getInputVolume}
getOutputVolume={getOutputVolume}
/>
</div>
<div className="flex flex-col gap-0.5">
<p className="text-sm leading-none font-medium">
{DEFAULT_AGENT.name}
</p>
<div className="flex items-center gap-2">
{errorMessage ? (
<p className="text-destructive text-xs">{errorMessage}</p>
) : agentState === "disconnected" || agentState === null ? (
<p className="text-muted-foreground text-xs">
Tap to start voice chat
</p>
) : agentState === "connected" ? (
<p className="text-xs text-green-600">Connected</p>
) : isTransitioning ? (
<ShimmeringText
text={agentState}
className="text-xs capitalize"
/>
) : null}
</div>
</div>
</div>
<div
className={cn(
"flex h-2 w-2 rounded-full transition-all duration-300",
agentState === "connected" &&
"bg-green-500 shadow-[0_0_8px_rgba(34,197,94,0.5)]",
isTransitioning && "animate-pulse bg-white/40"
)}
/>
</CardHeader>
<CardContent className="flex-1 overflow-hidden p-0">
<Conversation className="h-full">
<ConversationContent className="flex min-w-0 flex-col gap-2 p-6 pb-2">
{messages.length === 0 ? (
<ConversationEmptyState
icon={<Orb className="size-12" />}
title={
agentState === "connecting" ? (
<ShimmeringText text="Starting conversation" />
) : agentState === "connected" ? (
<ShimmeringText text="Start talking or type" />
) : (
"Start a conversation"
)
}
description={
agentState === "connecting"
? "Connecting..."
: agentState === "connected"
? "Ready to chat"
: "Type a message or tap the voice button"
}
/>
) : (
messages.map((message, index) => {
return (
<div key={index} className="flex w-full flex-col gap-1">
<Message from={message.role}>
<MessageContent className="max-w-full min-w-0">
<Response className="w-auto [overflow-wrap:anywhere] whitespace-pre-wrap">
{message.content}
</Response>
</MessageContent>
{message.role === "assistant" && (
<div className="ring-border size-6 flex-shrink-0 self-end overflow-hidden rounded-full ring-1">
<Orb
className="h-full w-full"
agentState={
isCallActive && index === messages.length - 1
? "talking"
: null
}
/>
</div>
)}
</Message>
{message.role === "assistant" && (
<ChatActions>
<ChatAction
size="sm"
tooltip={copiedIndex === index ? "Copied!" : "Copy"}
onClick={() => {
navigator.clipboard.writeText(message.content)
setCopiedIndex(index)
setTimeout(() => setCopiedIndex(null), 2000)
}}
>
{copiedIndex === index ? (
<CheckIcon className="size-4" />
) : (
<CopyIcon className="size-4" />
)}
</ChatAction>
</ChatActions>
)}
</div>
)
})
)}
</ConversationContent>
<ConversationScrollButton />
</Conversation>
</CardContent>
<CardFooter className="shrink-0 border-t">
<div className="flex w-full items-center gap-2">
<div className="flex flex-1 items-center gap-2">
<Input
value={textInput}
onChange={handleTextInputChange}
onKeyDown={handleKeyDown}
placeholder="Type a message..."
className="h-9 focus-visible:ring-0 focus-visible:ring-offset-0"
disabled={isTransitioning}
/>
<Button
onClick={handleSendText}
size="icon"
variant="ghost"
className="rounded-full"
disabled={!textInput.trim() || isTransitioning}
>
<SendIcon className="size-4" />
<span className="sr-only">Send message</span>
</Button>
{!isCallActive && (
<Button
onClick={handleCall}
size="icon"
variant="ghost"
className={cn("relative shrink-0 rounded-full transition-all")}
disabled={isTransitioning}
>
<AudioLinesIcon className="size-4" />
<span className="sr-only">Start voice call</span>
</Button>
)}
{isCallActive && (
<Button
onClick={handleCall}
size="icon"
variant="secondary"
className={cn("relative shrink-0 rounded-full transition-all")}
disabled={isTransitioning}
>
<PhoneOffIcon className="size-4" />
<span className="sr-only">End call</span>
</Button>
)}
</div>
</div>
</CardFooter>
</Card>
)
}
Voice chat 1
voice-chat-01
Customer Support
Tap to start voice chat
Start a conversation
Type a message or tap the voice button
"use client"
import { useCallback, useEffect, useRef, useState } from "react"
import type { ComponentProps } from "react"
import { useConversation } from "@elevenlabs/react"
import {
AudioLinesIcon,
CheckIcon,
CopyIcon,
PhoneOffIcon,
SendIcon,
} from "lucide-react"
import { cn } from "@/lib/utils"
import { Button } from "@/components/ui/button"
import {
Card,
CardContent,
CardFooter,
CardHeader,
} from "@/components/ui/card"
import {
Conversation,
ConversationContent,
ConversationEmptyState,
ConversationScrollButton,
} from "@/components/ui/conversation"
import { Input } from "@/components/ui/input"
import { Message, MessageContent } from "@/components/ui/message"
import { Orb } from "@/components/ui/orb"
import { Response } from "@/components/ui/response"
import { ShimmeringText } from "@/components/ui/shimmering-text"
import {
Tooltip,
TooltipContent,
TooltipProvider,
TooltipTrigger,
} from "@/components/ui/tooltip"
type SystemMessageType = "initial" | "connecting" | "connected" | "error"
interface ChatMessage {
role: "user" | "assistant"
content: string
timestamp?: Date
type?: SystemMessageType
}
const DEFAULT_AGENT = {
agentId: process.env.NEXT_PUBLIC_ELEVENLABS_AGENT_ID!,
name: "Customer Support",
description: "AI Voice Assistant",
}
type ChatActionsProps = ComponentProps<"div">
const ChatActions = ({ className, children, ...props }: ChatActionsProps) => (
<div className={cn("flex items-center gap-1", className)} {...props}>
{children}
</div>
)
type ChatActionProps = ComponentProps<typeof Button> & {
tooltip?: string
label?: string
}
const ChatAction = ({
tooltip,
children,
label,
className,
variant = "ghost",
size = "sm",
...props
}: ChatActionProps) => {
const button = (
<Button
className={cn(
"text-muted-foreground hover:text-foreground relative size-9 p-1.5",
className
)}
size={size}
type="button"
variant={variant}
{...props}
>
{children}
<span className="sr-only">{label || tooltip}</span>
</Button>
)
if (tooltip) {
return (
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>{button}</TooltipTrigger>
<TooltipContent>
<p>{tooltip}</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
)
}
return button
}
export default function Page() {
const [messages, setMessages] = useState<ChatMessage[]>([])
const [agentState, setAgentState] = useState<
"disconnected" | "connecting" | "connected" | "disconnecting" | null
>("disconnected")
const [textInput, setTextInput] = useState("")
const [copiedIndex, setCopiedIndex] = useState<number | null>(null)
const [errorMessage, setErrorMessage] = useState<string | null>(null)
const mediaStreamRef = useRef<MediaStream | null>(null)
const isTextOnlyModeRef = useRef<boolean>(true)
const conversation = useConversation({
onConnect: () => {
// Only clear messages for voice mode
if (!isTextOnlyModeRef.current) {
setMessages([])
}
},
onDisconnect: () => {
// Only clear messages for voice mode
if (!isTextOnlyModeRef.current) {
setMessages([])
}
},
onMessage: (message) => {
if (message.message) {
const newMessage: ChatMessage = {
role: message.source === "user" ? "user" : "assistant",
content: message.message,
}
setMessages((prev) => [...prev, newMessage])
}
},
onError: (error) => {
console.error("Error:", error)
setAgentState("disconnected")
},
onDebug: (debug) => {
console.log("Debug:", debug)
},
})
const getMicStream = useCallback(async () => {
if (mediaStreamRef.current) return mediaStreamRef.current
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
mediaStreamRef.current = stream
setErrorMessage(null)
return stream
} catch (error) {
if (error instanceof DOMException && error.name === "NotAllowedError") {
setErrorMessage("Please enable microphone permissions in your browser.")
}
throw error
}
}, [])
const startConversation = useCallback(
async (
textOnly: boolean = true,
skipConnectingMessage: boolean = false
) => {
try {
isTextOnlyModeRef.current = textOnly
if (!skipConnectingMessage) {
setMessages([])
}
if (!textOnly) {
await getMicStream()
}
await conversation.startSession({
agentId: DEFAULT_AGENT.agentId,
connectionType: textOnly ? "websocket" : "webrtc",
overrides: {
conversation: {
textOnly: textOnly,
},
agent: {
firstMessage: textOnly ? "" : undefined,
},
},
onStatusChange: (status) => setAgentState(status.status),
})
} catch (error) {
console.error(error)
setAgentState("disconnected")
setMessages([])
}
},
[conversation, getMicStream]
)
const handleCall = useCallback(async () => {
if (agentState === "disconnected" || agentState === null) {
setAgentState("connecting")
try {
await startConversation(false)
} catch {
setAgentState("disconnected")
}
} else if (agentState === "connected") {
conversation.endSession()
setAgentState("disconnected")
if (mediaStreamRef.current) {
mediaStreamRef.current.getTracks().forEach((t) => t.stop())
mediaStreamRef.current = null
}
}
}, [agentState, conversation, startConversation])
const handleTextInputChange = useCallback(
(e: React.ChangeEvent<HTMLInputElement>) => {
setTextInput(e.target.value)
},
[]
)
const handleSendText = useCallback(async () => {
if (!textInput.trim()) return
const messageToSend = textInput
if (agentState === "disconnected" || agentState === null) {
const userMessage: ChatMessage = {
role: "user",
content: messageToSend,
}
setTextInput("")
setAgentState("connecting")
setMessages([userMessage])
try {
await startConversation(true, true)
// Send message after connection is established
conversation.sendUserMessage(messageToSend)
} catch (error) {
console.error("Failed to start conversation:", error)
}
} else if (agentState === "connected") {
const newMessage: ChatMessage = {
role: "user",
content: messageToSend,
}
setMessages((prev) => [...prev, newMessage])
setTextInput("")
conversation.sendUserMessage(messageToSend)
}
}, [textInput, agentState, conversation, startConversation])
const handleKeyDown = useCallback(
(e: React.KeyboardEvent<HTMLInputElement>) => {
if (e.key === "Enter" && !e.shiftKey) {
e.preventDefault()
handleSendText()
}
},
[handleSendText]
)
useEffect(() => {
return () => {
if (mediaStreamRef.current) {
mediaStreamRef.current.getTracks().forEach((t) => t.stop())
}
}
}, [])
const isCallActive = agentState === "connected"
const isTransitioning =
agentState === "connecting" || agentState === "disconnecting"
const getInputVolume = useCallback(() => {
const rawValue = conversation.getInputVolume?.() ?? 0
return Math.min(1.0, Math.pow(rawValue, 0.5) * 2.5)
}, [conversation])
const getOutputVolume = useCallback(() => {
const rawValue = conversation.getOutputVolume?.() ?? 0
return Math.min(1.0, Math.pow(rawValue, 0.5) * 2.5)
}, [conversation])
return (
<Card
className={cn(
"mx-auto flex h-[380px] w-full flex-col gap-0 overflow-hidden"
)}
>
<CardHeader className="flex shrink-0 flex-row items-center justify-between pb-4">
<div className="flex items-center gap-4">
<div className="ring-border relative size-10 overflow-hidden rounded-full ring-1">
<Orb
className="h-full w-full"
volumeMode="manual"
getInputVolume={getInputVolume}
getOutputVolume={getOutputVolume}
/>
</div>
<div className="flex flex-col gap-0.5">
<p className="text-sm leading-none font-medium">
{DEFAULT_AGENT.name}
</p>
<div className="flex items-center gap-2">
{errorMessage ? (
<p className="text-destructive text-xs">{errorMessage}</p>
) : agentState === "disconnected" || agentState === null ? (
<p className="text-muted-foreground text-xs">
Tap to start voice chat
</p>
) : agentState === "connected" ? (
<p className="text-xs text-green-600">Connected</p>
) : isTransitioning ? (
<ShimmeringText
text={agentState}
className="text-xs capitalize"
/>
) : null}
</div>
</div>
</div>
<div
className={cn(
"flex h-2 w-2 rounded-full transition-all duration-300",
agentState === "connected" &&
"bg-green-500 shadow-[0_0_8px_rgba(34,197,94,0.5)]",
isTransitioning && "animate-pulse bg-white/40"
)}
/>
</CardHeader>
<CardContent className="flex-1 overflow-hidden p-0">
<Conversation className="h-full">
<ConversationContent className="flex min-w-0 flex-col gap-2 p-6 pb-2">
{messages.length === 0 ? (
<ConversationEmptyState
icon={<Orb className="size-12" />}
title={
agentState === "connecting" ? (
<ShimmeringText text="Starting conversation" />
) : agentState === "connected" ? (
<ShimmeringText text="Start talking or type" />
) : (
"Start a conversation"
)
}
description={
agentState === "connecting"
? "Connecting..."
: agentState === "connected"
? "Ready to chat"
: "Type a message or tap the voice button"
}
/>
) : (
messages.map((message, index) => {
return (
<div key={index} className="flex w-full flex-col gap-1">
<Message from={message.role}>
<MessageContent className="max-w-full min-w-0">
<Response className="w-auto [overflow-wrap:anywhere] whitespace-pre-wrap">
{message.content}
</Response>
</MessageContent>
{message.role === "assistant" && (
<div className="ring-border size-6 flex-shrink-0 self-end overflow-hidden rounded-full ring-1">
<Orb
className="h-full w-full"
agentState={
isCallActive && index === messages.length - 1
? "talking"
: null
}
/>
</div>
)}
</Message>
{message.role === "assistant" && (
<ChatActions>
<ChatAction
size="sm"
tooltip={copiedIndex === index ? "Copied!" : "Copy"}
onClick={() => {
navigator.clipboard.writeText(message.content)
setCopiedIndex(index)
setTimeout(() => setCopiedIndex(null), 2000)
}}
>
{copiedIndex === index ? (
<CheckIcon className="size-4" />
) : (
<CopyIcon className="size-4" />
)}
</ChatAction>
</ChatActions>
)}
</div>
)
})
)}
</ConversationContent>
<ConversationScrollButton />
</Conversation>
</CardContent>
<CardFooter className="shrink-0 border-t">
<div className="flex w-full items-center gap-2">
<div className="flex flex-1 items-center gap-2">
<Input
value={textInput}
onChange={handleTextInputChange}
onKeyDown={handleKeyDown}
placeholder="Type a message..."
className="h-9 focus-visible:ring-0 focus-visible:ring-offset-0"
disabled={isTransitioning}
/>
<Button
onClick={handleSendText}
size="icon"
variant="ghost"
className="rounded-full"
disabled={!textInput.trim() || isTransitioning}
>
<SendIcon className="size-4" />
<span className="sr-only">Send message</span>
</Button>
{!isCallActive && (
<Button
onClick={handleCall}
size="icon"
variant="ghost"
className={cn("relative shrink-0 rounded-full transition-all")}
disabled={isTransitioning}
>
<AudioLinesIcon className="size-4" />
<span className="sr-only">Start voice call</span>
</Button>
)}
{isCallActive && (
<Button
onClick={handleCall}
size="icon"
variant="secondary"
className={cn("relative shrink-0 rounded-full transition-all")}
disabled={isTransitioning}
>
<PhoneOffIcon className="size-4" />
<span className="sr-only">End call</span>
</Button>
)}
</div>
</div>
</CardFooter>
</Card>
)
}
Files
"use client"
import { Fragment, useCallback, useEffect, useRef, useState } from "react"
import { Copy } from "lucide-react"
import { Streamdown } from "streamdown"
import { cn } from "@/lib/utils"
import {
transcribeAudio,
type TranscriptionResult,
} from "@/app/transcriber-01/actions/transcribe"
import { Button } from "@/components/ui/button"
import { Card } from "@/components/ui/card"
import { LiveWaveform } from "@/components/ui/live-waveform"
import { ScrollArea } from "@/components/ui/scroll-area"
import { Separator } from "@/components/ui/separator"
interface RecordingState {
isRecording: boolean
isProcessing: boolean
transcript: string
error: string
transcriptionTime?: number
}
export default function Transcriber01() {
const [recording, setRecording] = useState<RecordingState>({
isRecording: false,
isProcessing: false,
transcript: "",
error: "",
})
const mediaRecorderRef = useRef<MediaRecorder | null>(null)
const audioChunksRef = useRef<Blob[]>([])
const streamRef = useRef<MediaStream | null>(null)
const updateRecording = useCallback((updates: Partial<RecordingState>) => {
setRecording((prev) => ({ ...prev, ...updates }))
}, [])
const cleanupStream = useCallback(() => {
if (streamRef.current) {
streamRef.current.getTracks().forEach((track) => track.stop())
streamRef.current = null
}
}, [])
const stopRecording = useCallback(() => {
if (mediaRecorderRef.current?.state !== "inactive") {
mediaRecorderRef.current?.stop()
}
cleanupStream()
updateRecording({ isRecording: false })
}, [cleanupStream, updateRecording])
const processAudio = useCallback(
async (audioBlob: Blob) => {
updateRecording({ isProcessing: true, error: "" })
try {
const result: TranscriptionResult = await transcribeAudio({
audio: new File([audioBlob], "recording.webm", {
type: "audio/webm",
}),
})
if (result.error) {
throw new Error(result.error)
}
updateRecording({
transcript: result.text || "",
transcriptionTime: result.transcriptionTime,
isProcessing: false,
})
} catch (err) {
console.error("Transcription error:", err)
updateRecording({
error:
err instanceof Error ? err.message : "Failed to transcribe audio",
isProcessing: false,
})
}
},
[updateRecording]
)
const startRecording = useCallback(async () => {
try {
updateRecording({
transcript: "",
error: "",
transcriptionTime: undefined,
})
audioChunksRef.current = []
const stream =
await navigator.mediaDevices.getUserMedia(AUDIO_CONSTRAINTS)
streamRef.current = stream
const mimeType = getMimeType()
const mediaRecorder = new MediaRecorder(stream, { mimeType })
mediaRecorderRef.current = mediaRecorder
mediaRecorder.ondataavailable = (event: BlobEvent) => {
if (event.data.size > 0) {
audioChunksRef.current.push(event.data)
}
}
mediaRecorder.onstop = () => {
const audioBlob = new Blob(audioChunksRef.current, { type: mimeType })
processAudio(audioBlob)
}
mediaRecorder.start()
updateRecording({ isRecording: true })
} catch (err) {
updateRecording({
error: "Microphone permission denied",
isRecording: false,
})
console.error("Microphone error:", err)
}
}, [processAudio, updateRecording])
const handleRecordToggle = useCallback(() => {
if (recording.isRecording) {
stopRecording()
} else {
startRecording()
}
}, [recording.isRecording, startRecording, stopRecording])
useEffect(() => {
const handleKeyDown = (e: KeyboardEvent) => {
if (e.altKey && e.code === "Space") {
e.preventDefault()
handleRecordToggle()
}
}
window.addEventListener("keydown", handleKeyDown)
return () => window.removeEventListener("keydown", handleKeyDown)
}, [handleRecordToggle])
useEffect(() => {
return cleanupStream
}, [cleanupStream])
return (
<div className="mx-auto w-full">
<Card className="border-border relative m-0 gap-0 overflow-hidden p-0 shadow-2xl">
<div className="relative py-6">
<div className="flex h-32 items-center justify-center">
{recording.isProcessing && <TranscriberProcessing />}
{(Boolean(recording.transcript) || Boolean(recording.error)) && (
<TranscriberTranscript
transcript={recording.transcript}
error={recording.error}
/>
)}
{!recording.isProcessing &&
!Boolean(recording.transcript) &&
!Boolean(recording.error) && (
<LiveWaveform
active={recording.isRecording}
barWidth={5}
barGap={2}
barRadius={8}
barColor="#71717a"
fadeEdges
fadeWidth={48}
sensitivity={0.8}
smoothingTimeConstant={0.85}
className="w-full"
/>
)}
</div>
</div>
<Separator />
<div className="bg-card px-4 py-2">
<div className="flex items-center justify-between">
<div className="flex items-center gap-3">
<span
className={cn(
"text-muted-foreground/60 font-mono text-[10px] tracking-widest uppercase",
(recording.transcriptionTime &&
Boolean(recording.transcript)) ||
Boolean(recording.error)
? "animate-in fade-in duration-500"
: "opacity-0"
)}
>
{recording.error
? "Error"
: recording.transcriptionTime
? `${(recording.transcriptionTime / 1000).toFixed(2)}s`
: "0.00s"}
</span>
</div>
<div className="flex items-center gap-3">
<Button
variant="outline"
size="sm"
className="gap-2"
onClick={handleRecordToggle}
disabled={recording.isProcessing}
aria-label={
recording.isRecording ? "Stop recording" : "Start recording"
}
>
{recording.isRecording || recording.isProcessing
? "Stop"
: "Record"}
<kbd className="bg-muted text-muted-foreground pointer-events-none inline-flex h-5 items-center gap-1 rounded border px-1.5 font-mono text-[10px] font-medium select-none">
<span className="text-xs">⌥</span>Space
</kbd>
</Button>
</div>
</div>
</div>
</Card>
</div>
)
}
const TranscriberProcessing = () => {
return (
<LiveWaveform
active={false}
processing
barWidth={4}
barGap={1}
barRadius={8}
barColor="#71717a"
fadeEdges
fadeWidth={48}
className="w-full opacity-60"
/>
)
}
const TranscriberTranscript = ({
transcript,
error,
}: {
transcript: string
error: string
}) => {
const displayText = error || transcript
return (
<Fragment>
<div className="relative w-full max-w-2xl px-6">
<ScrollArea className="h-32 w-full">
<div
className={cn(
"text-foreground py-1 pr-8 text-left text-sm leading-relaxed",
error && "text-red-500"
)}
>
<Streamdown>{displayText}</Streamdown>
</div>
</ScrollArea>
{transcript && !error && (
<Button
variant="ghost"
size="icon"
className="absolute top-1 right-2 h-6 w-6 opacity-50 transition-opacity hover:opacity-100"
onClick={() => {
navigator.clipboard.writeText(transcript)
}}
aria-label="Copy transcript"
>
<Copy className="h-3.5 w-3.5" />
</Button>
)}
</div>
</Fragment>
)
}
const AUDIO_CONSTRAINTS: MediaStreamConstraints = {
audio: {
echoCancellation: true,
noiseSuppression: true,
autoGainControl: true,
},
}
const SUPPORTED_MIME_TYPES = ["audio/webm;codecs=opus", "audio/webm"] as const
function getMimeType(): string {
for (const type of SUPPORTED_MIME_TYPES) {
if (MediaRecorder.isTypeSupported(type)) {
return type
}
}
return "audio/webm"
}
Transcriber
transcriber-01
0.00s
"use client"
import { Fragment, useCallback, useEffect, useRef, useState } from "react"
import { Copy } from "lucide-react"
import { Streamdown } from "streamdown"
import { cn } from "@/lib/utils"
import {
transcribeAudio,
type TranscriptionResult,
} from "@/app/transcriber-01/actions/transcribe"
import { Button } from "@/components/ui/button"
import { Card } from "@/components/ui/card"
import { LiveWaveform } from "@/components/ui/live-waveform"
import { ScrollArea } from "@/components/ui/scroll-area"
import { Separator } from "@/components/ui/separator"
interface RecordingState {
isRecording: boolean
isProcessing: boolean
transcript: string
error: string
transcriptionTime?: number
}
export default function Transcriber01() {
const [recording, setRecording] = useState<RecordingState>({
isRecording: false,
isProcessing: false,
transcript: "",
error: "",
})
const mediaRecorderRef = useRef<MediaRecorder | null>(null)
const audioChunksRef = useRef<Blob[]>([])
const streamRef = useRef<MediaStream | null>(null)
const updateRecording = useCallback((updates: Partial<RecordingState>) => {
setRecording((prev) => ({ ...prev, ...updates }))
}, [])
const cleanupStream = useCallback(() => {
if (streamRef.current) {
streamRef.current.getTracks().forEach((track) => track.stop())
streamRef.current = null
}
}, [])
const stopRecording = useCallback(() => {
if (mediaRecorderRef.current?.state !== "inactive") {
mediaRecorderRef.current?.stop()
}
cleanupStream()
updateRecording({ isRecording: false })
}, [cleanupStream, updateRecording])
const processAudio = useCallback(
async (audioBlob: Blob) => {
updateRecording({ isProcessing: true, error: "" })
try {
const result: TranscriptionResult = await transcribeAudio({
audio: new File([audioBlob], "recording.webm", {
type: "audio/webm",
}),
})
if (result.error) {
throw new Error(result.error)
}
updateRecording({
transcript: result.text || "",
transcriptionTime: result.transcriptionTime,
isProcessing: false,
})
} catch (err) {
console.error("Transcription error:", err)
updateRecording({
error:
err instanceof Error ? err.message : "Failed to transcribe audio",
isProcessing: false,
})
}
},
[updateRecording]
)
const startRecording = useCallback(async () => {
try {
updateRecording({
transcript: "",
error: "",
transcriptionTime: undefined,
})
audioChunksRef.current = []
const stream =
await navigator.mediaDevices.getUserMedia(AUDIO_CONSTRAINTS)
streamRef.current = stream
const mimeType = getMimeType()
const mediaRecorder = new MediaRecorder(stream, { mimeType })
mediaRecorderRef.current = mediaRecorder
mediaRecorder.ondataavailable = (event: BlobEvent) => {
if (event.data.size > 0) {
audioChunksRef.current.push(event.data)
}
}
mediaRecorder.onstop = () => {
const audioBlob = new Blob(audioChunksRef.current, { type: mimeType })
processAudio(audioBlob)
}
mediaRecorder.start()
updateRecording({ isRecording: true })
} catch (err) {
updateRecording({
error: "Microphone permission denied",
isRecording: false,
})
console.error("Microphone error:", err)
}
}, [processAudio, updateRecording])
const handleRecordToggle = useCallback(() => {
if (recording.isRecording) {
stopRecording()
} else {
startRecording()
}
}, [recording.isRecording, startRecording, stopRecording])
useEffect(() => {
const handleKeyDown = (e: KeyboardEvent) => {
if (e.altKey && e.code === "Space") {
e.preventDefault()
handleRecordToggle()
}
}
window.addEventListener("keydown", handleKeyDown)
return () => window.removeEventListener("keydown", handleKeyDown)
}, [handleRecordToggle])
useEffect(() => {
return cleanupStream
}, [cleanupStream])
return (
<div className="mx-auto w-full">
<Card className="border-border relative m-0 gap-0 overflow-hidden p-0 shadow-2xl">
<div className="relative py-6">
<div className="flex h-32 items-center justify-center">
{recording.isProcessing && <TranscriberProcessing />}
{(Boolean(recording.transcript) || Boolean(recording.error)) && (
<TranscriberTranscript
transcript={recording.transcript}
error={recording.error}
/>
)}
{!recording.isProcessing &&
!Boolean(recording.transcript) &&
!Boolean(recording.error) && (
<LiveWaveform
active={recording.isRecording}
barWidth={5}
barGap={2}
barRadius={8}
barColor="#71717a"
fadeEdges
fadeWidth={48}
sensitivity={0.8}
smoothingTimeConstant={0.85}
className="w-full"
/>
)}
</div>
</div>
<Separator />
<div className="bg-card px-4 py-2">
<div className="flex items-center justify-between">
<div className="flex items-center gap-3">
<span
className={cn(
"text-muted-foreground/60 font-mono text-[10px] tracking-widest uppercase",
(recording.transcriptionTime &&
Boolean(recording.transcript)) ||
Boolean(recording.error)
? "animate-in fade-in duration-500"
: "opacity-0"
)}
>
{recording.error
? "Error"
: recording.transcriptionTime
? `${(recording.transcriptionTime / 1000).toFixed(2)}s`
: "0.00s"}
</span>
</div>
<div className="flex items-center gap-3">
<Button
variant="outline"
size="sm"
className="gap-2"
onClick={handleRecordToggle}
disabled={recording.isProcessing}
aria-label={
recording.isRecording ? "Stop recording" : "Start recording"
}
>
{recording.isRecording || recording.isProcessing
? "Stop"
: "Record"}
<kbd className="bg-muted text-muted-foreground pointer-events-none inline-flex h-5 items-center gap-1 rounded border px-1.5 font-mono text-[10px] font-medium select-none">
<span className="text-xs">⌥</span>Space
</kbd>
</Button>
</div>
</div>
</div>
</Card>
</div>
)
}
const TranscriberProcessing = () => {
return (
<LiveWaveform
active={false}
processing
barWidth={4}
barGap={1}
barRadius={8}
barColor="#71717a"
fadeEdges
fadeWidth={48}
className="w-full opacity-60"
/>
)
}
const TranscriberTranscript = ({
transcript,
error,
}: {
transcript: string
error: string
}) => {
const displayText = error || transcript
return (
<Fragment>
<div className="relative w-full max-w-2xl px-6">
<ScrollArea className="h-32 w-full">
<div
className={cn(
"text-foreground py-1 pr-8 text-left text-sm leading-relaxed",
error && "text-red-500"
)}
>
<Streamdown>{displayText}</Streamdown>
</div>
</ScrollArea>
{transcript && !error && (
<Button
variant="ghost"
size="icon"
className="absolute top-1 right-2 h-6 w-6 opacity-50 transition-opacity hover:opacity-100"
onClick={() => {
navigator.clipboard.writeText(transcript)
}}
aria-label="Copy transcript"
>
<Copy className="h-3.5 w-3.5" />
</Button>
)}
</div>
</Fragment>
)
}
const AUDIO_CONSTRAINTS: MediaStreamConstraints = {
audio: {
echoCancellation: true,
noiseSuppression: true,
autoGainControl: true,
},
}
const SUPPORTED_MIME_TYPES = ["audio/webm;codecs=opus", "audio/webm"] as const
function getMimeType(): string {
for (const type of SUPPORTED_MIME_TYPES) {
if (MediaRecorder.isTypeSupported(type)) {
return type
}
}
return "audio/webm"
}
Files
import { Speaker } from "@/components/speaker"
export default function Page() {
return <Speaker />
}
EL-01 Speaker
speaker-01
import { Speaker } from "@/components/speaker"
export default function Page() {
return <Speaker />
}
Files
"use client"
import { useCallback, useEffect, useRef, useState } from "react"
import { zodResolver } from "@hookform/resolvers/zod"
import { useForm } from "react-hook-form"
import { cn } from "@/lib/utils"
import { voiceToFormAction } from "@/app/voice-form/actions/voice-to-form"
import {
exampleFormSchema,
ExampleFormValues,
} from "@/app/voice-form/schema"
import { Button } from "@/components/ui/button"
import {
Card,
CardContent,
CardDescription,
CardHeader,
CardTitle,
} from "@/components/ui/card"
import {
Form,
FormControl,
FormField,
FormItem,
FormLabel,
FormMessage,
} from "@/components/ui/form"
import { Input } from "@/components/ui/input"
import { VoiceButton } from "@/components/ui/voice-button"
const AUDIO_CONSTRAINTS: MediaStreamConstraints = {
audio: {
echoCancellation: true,
noiseSuppression: true,
autoGainControl: true,
},
}
const SUPPORTED_MIME_TYPES = ["audio/webm;codecs=opus", "audio/webm"] as const
function getMimeType(): string {
for (const type of SUPPORTED_MIME_TYPES) {
if (MediaRecorder.isTypeSupported(type)) {
return type
}
}
return "audio/webm"
}
export default function Page() {
const [isRecording, setIsRecording] = useState(false)
const [isProcessing, setIsProcessing] = useState(false)
const [error, setError] = useState("")
const [success, setSuccess] = useState(false)
const mediaRecorderRef = useRef<MediaRecorder | null>(null)
const audioChunksRef = useRef<Blob[]>([])
const streamRef = useRef<MediaStream | null>(null)
const form = useForm<ExampleFormValues>({
resolver: zodResolver(exampleFormSchema),
defaultValues: {
firstName: "",
lastName: "",
},
mode: "onChange",
})
const cleanupStream = useCallback(() => {
if (streamRef.current) {
streamRef.current.getTracks().forEach((track) => track.stop())
streamRef.current = null
}
}, [])
const processAudio = useCallback(
async (audioBlob: Blob) => {
setIsProcessing(true)
setError("")
setSuccess(false)
try {
const audioFile = new File([audioBlob], "audio.webm", {
type: audioBlob.type,
})
const result = await voiceToFormAction(audioFile)
if (result.data && Object.keys(result.data).length > 0) {
Object.entries(result.data).forEach(([key, value]) => {
if (value) {
form.setValue(key as keyof ExampleFormValues, value as string, {
shouldValidate: true,
})
}
})
setSuccess(true)
setTimeout(() => setSuccess(false), 2000)
}
} catch (err) {
console.error("Voice input error:", err)
setError(err instanceof Error ? err.message : "Failed to process audio")
} finally {
setIsProcessing(false)
}
},
[form]
)
const stopRecording = useCallback(() => {
if (mediaRecorderRef.current?.state !== "inactive") {
mediaRecorderRef.current?.stop()
}
cleanupStream()
setIsRecording(false)
}, [cleanupStream])
const startRecording = useCallback(async () => {
try {
setError("")
audioChunksRef.current = []
const stream =
await navigator.mediaDevices.getUserMedia(AUDIO_CONSTRAINTS)
streamRef.current = stream
const mimeType = getMimeType()
const mediaRecorder = new MediaRecorder(stream, { mimeType })
mediaRecorderRef.current = mediaRecorder
mediaRecorder.ondataavailable = (event: BlobEvent) => {
if (event.data.size > 0) {
audioChunksRef.current.push(event.data)
}
}
mediaRecorder.onstop = () => {
const audioBlob = new Blob(audioChunksRef.current, { type: mimeType })
processAudio(audioBlob)
}
mediaRecorder.start()
setIsRecording(true)
} catch (err) {
setError("Microphone permission denied")
console.error("Microphone error:", err)
}
}, [processAudio])
const handleVoiceToggle = useCallback(() => {
if (isRecording) {
stopRecording()
} else {
startRecording()
}
}, [isRecording, startRecording, stopRecording])
useEffect(() => {
return cleanupStream
}, [cleanupStream])
const onSubmit = (data: ExampleFormValues) => {
console.log("Form submitted:", data)
}
const voiceState = isProcessing
? "processing"
: isRecording
? "recording"
: success
? "success"
: error
? "error"
: "idle"
return (
<div className="mx-auto w-full">
<Card className="relative overflow-hidden">
<div className={cn("flex flex-col gap-2")}>
<CardHeader>
<div className="flex items-start justify-between">
<div className="space-y-1">
<CardTitle>Voice Fill</CardTitle>
<CardDescription>Powered by ElevenLabs Scribe</CardDescription>
</div>
<VoiceButton
state={voiceState}
onPress={handleVoiceToggle}
disabled={isProcessing}
trailing="Voice Fill"
/>
</div>
</CardHeader>
<CardContent>
<Form {...form}>
<form
onSubmit={form.handleSubmit(onSubmit)}
className="space-y-6"
>
<div className="grid grid-cols-1 gap-4 sm:grid-cols-2">
<FormField
control={form.control}
name="firstName"
render={({ field }) => (
<FormItem>
<FormLabel>First Name *</FormLabel>
<FormControl>
<Input placeholder="John" {...field} />
</FormControl>
<FormMessage />
</FormItem>
)}
/>
<FormField
control={form.control}
name="lastName"
render={({ field }) => (
<FormItem>
<FormLabel>Last Name *</FormLabel>
<FormControl>
<Input placeholder="Doe" {...field} />
</FormControl>
<FormMessage />
</FormItem>
)}
/>
</div>
</form>
</Form>
</CardContent>
</div>
</Card>
</div>
)
}
Voice-fill form
voice-form-01
Voice Fill
Powered by ElevenLabs Scribe
"use client"
import { useCallback, useEffect, useRef, useState } from "react"
import { zodResolver } from "@hookform/resolvers/zod"
import { useForm } from "react-hook-form"
import { cn } from "@/lib/utils"
import { voiceToFormAction } from "@/app/voice-form/actions/voice-to-form"
import {
exampleFormSchema,
ExampleFormValues,
} from "@/app/voice-form/schema"
import { Button } from "@/components/ui/button"
import {
Card,
CardContent,
CardDescription,
CardHeader,
CardTitle,
} from "@/components/ui/card"
import {
Form,
FormControl,
FormField,
FormItem,
FormLabel,
FormMessage,
} from "@/components/ui/form"
import { Input } from "@/components/ui/input"
import { VoiceButton } from "@/components/ui/voice-button"
const AUDIO_CONSTRAINTS: MediaStreamConstraints = {
audio: {
echoCancellation: true,
noiseSuppression: true,
autoGainControl: true,
},
}
const SUPPORTED_MIME_TYPES = ["audio/webm;codecs=opus", "audio/webm"] as const
function getMimeType(): string {
for (const type of SUPPORTED_MIME_TYPES) {
if (MediaRecorder.isTypeSupported(type)) {
return type
}
}
return "audio/webm"
}
export default function Page() {
const [isRecording, setIsRecording] = useState(false)
const [isProcessing, setIsProcessing] = useState(false)
const [error, setError] = useState("")
const [success, setSuccess] = useState(false)
const mediaRecorderRef = useRef<MediaRecorder | null>(null)
const audioChunksRef = useRef<Blob[]>([])
const streamRef = useRef<MediaStream | null>(null)
const form = useForm<ExampleFormValues>({
resolver: zodResolver(exampleFormSchema),
defaultValues: {
firstName: "",
lastName: "",
},
mode: "onChange",
})
const cleanupStream = useCallback(() => {
if (streamRef.current) {
streamRef.current.getTracks().forEach((track) => track.stop())
streamRef.current = null
}
}, [])
const processAudio = useCallback(
async (audioBlob: Blob) => {
setIsProcessing(true)
setError("")
setSuccess(false)
try {
const audioFile = new File([audioBlob], "audio.webm", {
type: audioBlob.type,
})
const result = await voiceToFormAction(audioFile)
if (result.data && Object.keys(result.data).length > 0) {
Object.entries(result.data).forEach(([key, value]) => {
if (value) {
form.setValue(key as keyof ExampleFormValues, value as string, {
shouldValidate: true,
})
}
})
setSuccess(true)
setTimeout(() => setSuccess(false), 2000)
}
} catch (err) {
console.error("Voice input error:", err)
setError(err instanceof Error ? err.message : "Failed to process audio")
} finally {
setIsProcessing(false)
}
},
[form]
)
const stopRecording = useCallback(() => {
if (mediaRecorderRef.current?.state !== "inactive") {
mediaRecorderRef.current?.stop()
}
cleanupStream()
setIsRecording(false)
}, [cleanupStream])
const startRecording = useCallback(async () => {
try {
setError("")
audioChunksRef.current = []
const stream =
await navigator.mediaDevices.getUserMedia(AUDIO_CONSTRAINTS)
streamRef.current = stream
const mimeType = getMimeType()
const mediaRecorder = new MediaRecorder(stream, { mimeType })
mediaRecorderRef.current = mediaRecorder
mediaRecorder.ondataavailable = (event: BlobEvent) => {
if (event.data.size > 0) {
audioChunksRef.current.push(event.data)
}
}
mediaRecorder.onstop = () => {
const audioBlob = new Blob(audioChunksRef.current, { type: mimeType })
processAudio(audioBlob)
}
mediaRecorder.start()
setIsRecording(true)
} catch (err) {
setError("Microphone permission denied")
console.error("Microphone error:", err)
}
}, [processAudio])
const handleVoiceToggle = useCallback(() => {
if (isRecording) {
stopRecording()
} else {
startRecording()
}
}, [isRecording, startRecording, stopRecording])
useEffect(() => {
return cleanupStream
}, [cleanupStream])
const onSubmit = (data: ExampleFormValues) => {
console.log("Form submitted:", data)
}
const voiceState = isProcessing
? "processing"
: isRecording
? "recording"
: success
? "success"
: error
? "error"
: "idle"
return (
<div className="mx-auto w-full">
<Card className="relative overflow-hidden">
<div className={cn("flex flex-col gap-2")}>
<CardHeader>
<div className="flex items-start justify-between">
<div className="space-y-1">
<CardTitle>Voice Fill</CardTitle>
<CardDescription>Powered by ElevenLabs Scribe</CardDescription>
</div>
<VoiceButton
state={voiceState}
onPress={handleVoiceToggle}
disabled={isProcessing}
trailing="Voice Fill"
/>
</div>
</CardHeader>
<CardContent>
<Form {...form}>
<form
onSubmit={form.handleSubmit(onSubmit)}
className="space-y-6"
>
<div className="grid grid-cols-1 gap-4 sm:grid-cols-2">
<FormField
control={form.control}
name="firstName"
render={({ field }) => (
<FormItem>
<FormLabel>First Name *</FormLabel>
<FormControl>
<Input placeholder="John" {...field} />
</FormControl>
<FormMessage />
</FormItem>
)}
/>
<FormField
control={form.control}
name="lastName"
render={({ field }) => (
<FormItem>
<FormLabel>Last Name *</FormLabel>
<FormControl>
<Input placeholder="Doe" {...field} />
</FormControl>
<FormMessage />
</FormItem>
)}
/>
</div>
</form>
</Form>
</CardContent>
</div>
</Card>
</div>
)
}
Files
"use client"
import { PauseIcon, PlayIcon } from "lucide-react"
import { cn } from "@/lib/utils"
import {
AudioPlayerButton,
AudioPlayerDuration,
AudioPlayerProgress,
AudioPlayerProvider,
AudioPlayerTime,
exampleTracks,
useAudioPlayer,
} from "@/components/ui/audio-player"
import { Button } from "@/components/ui/button"
import { Card } from "@/components/ui/card"
import { ScrollArea } from "@/components/ui/scroll-area"
interface Track {
id: string
name: string
url: string
}
export default function Page() {
return (
<AudioPlayerProvider<Track>>
<MusicPlayer />
</AudioPlayerProvider>
)
}
const MusicPlayer = () => {
return (
<Card className="mx-auto w-full overflow-hidden p-0">
<div className="flex flex-col lg:h-[180px] lg:flex-row">
<div className="bg-muted/50 flex flex-col overflow-hidden lg:h-full lg:w-64">
<ScrollArea className="h-48 w-full lg:h-full">
<div className="space-y-1 p-3">
{exampleTracks.map((song, index) => (
<SongListItem
key={song.id}
song={song}
trackNumber={index + 1}
/>
))}
</div>
</ScrollArea>
</div>
<Player />
</div>
</Card>
)
}
const Player = () => {
const player = useAudioPlayer<Track>()
return (
<div className="flex flex-1 items-center p-4 sm:p-6">
<div className="mx-auto w-full max-w-2xl">
<div className="mb-4">
<h3 className="text-base font-semibold sm:text-lg">
{player.activeItem?.data?.name ?? "No track selected"}
</h3>
</div>
<div className="flex items-center gap-3 sm:gap-4">
<AudioPlayerButton
variant="outline"
size="default"
className="h-12 w-12 shrink-0 sm:h-10 sm:w-10"
disabled={!player.activeItem}
/>
<div className="flex flex-1 items-center gap-2 sm:gap-3">
<AudioPlayerTime className="text-xs tabular-nums" />
<AudioPlayerProgress className="flex-1" />
<AudioPlayerDuration className="text-xs tabular-nums" />
</div>
</div>
</div>
</div>
)
}
const SongListItem = ({
song,
trackNumber,
}: {
song: Track
trackNumber: number
}) => {
const player = useAudioPlayer<Track>()
const isActive = player.isItemActive(song.id)
const isCurrentlyPlaying = isActive && player.isPlaying
return (
<div className="group/song relative">
<Button
variant={isActive ? "secondary" : "ghost"}
size="sm"
className={cn(
"h-10 w-full justify-start px-3 font-normal sm:h-9 sm:px-2",
isActive && "bg-secondary"
)}
onClick={() => {
if (isCurrentlyPlaying) {
player.pause()
} else {
player.play({
id: song.id,
src: song.url,
data: song,
})
}
}}
>
<div className="flex w-full items-center gap-3">
<div className="flex w-5 shrink-0 items-center justify-center">
{isCurrentlyPlaying ? (
<PauseIcon className="h-4 w-4 sm:h-3.5 sm:w-3.5" />
) : (
<>
<span className="text-muted-foreground/60 text-sm tabular-nums group-hover/song:invisible">
{trackNumber}
</span>
<PlayIcon className="invisible absolute h-4 w-4 group-hover/song:visible sm:h-3.5 sm:w-3.5" />
</>
)}
</div>
<span className="truncate text-left text-sm">{song.name}</span>
</div>
</Button>
</div>
)
}
Music player with playlist
music-player-01
No track selected
0:00--:--
"use client"
import { PauseIcon, PlayIcon } from "lucide-react"
import { cn } from "@/lib/utils"
import {
AudioPlayerButton,
AudioPlayerDuration,
AudioPlayerProgress,
AudioPlayerProvider,
AudioPlayerTime,
exampleTracks,
useAudioPlayer,
} from "@/components/ui/audio-player"
import { Button } from "@/components/ui/button"
import { Card } from "@/components/ui/card"
import { ScrollArea } from "@/components/ui/scroll-area"
interface Track {
id: string
name: string
url: string
}
export default function Page() {
return (
<AudioPlayerProvider<Track>>
<MusicPlayer />
</AudioPlayerProvider>
)
}
const MusicPlayer = () => {
return (
<Card className="mx-auto w-full overflow-hidden p-0">
<div className="flex flex-col lg:h-[180px] lg:flex-row">
<div className="bg-muted/50 flex flex-col overflow-hidden lg:h-full lg:w-64">
<ScrollArea className="h-48 w-full lg:h-full">
<div className="space-y-1 p-3">
{exampleTracks.map((song, index) => (
<SongListItem
key={song.id}
song={song}
trackNumber={index + 1}
/>
))}
</div>
</ScrollArea>
</div>
<Player />
</div>
</Card>
)
}
const Player = () => {
const player = useAudioPlayer<Track>()
return (
<div className="flex flex-1 items-center p-4 sm:p-6">
<div className="mx-auto w-full max-w-2xl">
<div className="mb-4">
<h3 className="text-base font-semibold sm:text-lg">
{player.activeItem?.data?.name ?? "No track selected"}
</h3>
</div>
<div className="flex items-center gap-3 sm:gap-4">
<AudioPlayerButton
variant="outline"
size="default"
className="h-12 w-12 shrink-0 sm:h-10 sm:w-10"
disabled={!player.activeItem}
/>
<div className="flex flex-1 items-center gap-2 sm:gap-3">
<AudioPlayerTime className="text-xs tabular-nums" />
<AudioPlayerProgress className="flex-1" />
<AudioPlayerDuration className="text-xs tabular-nums" />
</div>
</div>
</div>
</div>
)
}
const SongListItem = ({
song,
trackNumber,
}: {
song: Track
trackNumber: number
}) => {
const player = useAudioPlayer<Track>()
const isActive = player.isItemActive(song.id)
const isCurrentlyPlaying = isActive && player.isPlaying
return (
<div className="group/song relative">
<Button
variant={isActive ? "secondary" : "ghost"}
size="sm"
className={cn(
"h-10 w-full justify-start px-3 font-normal sm:h-9 sm:px-2",
isActive && "bg-secondary"
)}
onClick={() => {
if (isCurrentlyPlaying) {
player.pause()
} else {
player.play({
id: song.id,
src: song.url,
data: song,
})
}
}}
>
<div className="flex w-full items-center gap-3">
<div className="flex w-5 shrink-0 items-center justify-center">
{isCurrentlyPlaying ? (
<PauseIcon className="h-4 w-4 sm:h-3.5 sm:w-3.5" />
) : (
<>
<span className="text-muted-foreground/60 text-sm tabular-nums group-hover/song:invisible">
{trackNumber}
</span>
<PlayIcon className="invisible absolute h-4 w-4 group-hover/song:visible sm:h-3.5 sm:w-3.5" />
</>
)}
</div>
<span className="truncate text-left text-sm">{song.name}</span>
</div>
</Button>
</div>
)
}
Files
"use client"
import {
AudioPlayerButton,
AudioPlayerDuration,
AudioPlayerProgress,
AudioPlayerProvider,
AudioPlayerTime,
exampleTracks,
useAudioPlayer,
} from "@/components/ui/audio-player"
import { Card } from "@/components/ui/card"
export default function Page() {
return (
<AudioPlayerProvider>
<MusicPlayerDemo />
</AudioPlayerProvider>
)
}
const MusicPlayerDemo = () => {
const player = useAudioPlayer<{ name: string }>()
const track = exampleTracks[9]
return (
<Card className="w-full overflow-hidden p-4">
<div className="space-y-4">
<div>
<h3 className="text-base font-semibold">
{player.activeItem?.data?.name || track.name}
</h3>
</div>
<div className="flex items-center gap-3">
<AudioPlayerButton
variant="outline"
size="default"
className="h-10 w-10 shrink-0"
item={{
id: track.id,
src: track.url,
data: track,
}}
/>
<div className="flex flex-1 items-center gap-2">
<AudioPlayerTime className="text-xs tabular-nums" />
<AudioPlayerProgress className="flex-1" />
<AudioPlayerDuration className="text-xs tabular-nums" />
</div>
</div>
</div>
</Card>
)
}
Simple music player
music-player-02
II - 09
0:00--:--
"use client"
import {
AudioPlayerButton,
AudioPlayerDuration,
AudioPlayerProgress,
AudioPlayerProvider,
AudioPlayerTime,
exampleTracks,
useAudioPlayer,
} from "@/components/ui/audio-player"
import { Card } from "@/components/ui/card"
export default function Page() {
return (
<AudioPlayerProvider>
<MusicPlayerDemo />
</AudioPlayerProvider>
)
}
const MusicPlayerDemo = () => {
const player = useAudioPlayer<{ name: string }>()
const track = exampleTracks[9]
return (
<Card className="w-full overflow-hidden p-4">
<div className="space-y-4">
<div>
<h3 className="text-base font-semibold">
{player.activeItem?.data?.name || track.name}
</h3>
</div>
<div className="flex items-center gap-3">
<AudioPlayerButton
variant="outline"
size="default"
className="h-10 w-10 shrink-0"
item={{
id: track.id,
src: track.url,
data: track,
}}
/>
<div className="flex flex-1 items-center gap-2">
<AudioPlayerTime className="text-xs tabular-nums" />
<AudioPlayerProgress className="flex-1" />
<AudioPlayerDuration className="text-xs tabular-nums" />
</div>
</div>
</div>
</Card>
)
}
Files
"use client"
import { useCallback, useState } from "react"
import { useConversation } from "@elevenlabs/react"
import { AnimatePresence, motion } from "framer-motion"
import { Loader2Icon, PhoneIcon, PhoneOffIcon } from "lucide-react"
import { cn } from "@/lib/utils"
import { Button } from "@/components/ui/button"
import { Card } from "@/components/ui/card"
import { Orb } from "@/components/ui/orb"
import { ShimmeringText } from "@/components/ui/shimmering-text"
const DEFAULT_AGENT = {
agentId: process.env.NEXT_PUBLIC_ELEVENLABS_AGENT_ID!,
name: "Customer Support",
description: "Tap to start voice chat",
}
type AgentState =
| "disconnected"
| "connecting"
| "connected"
| "disconnecting"
| null
export default function Page() {
const [agentState, setAgentState] = useState<AgentState>("disconnected")
const [errorMessage, setErrorMessage] = useState<string | null>(null)
const conversation = useConversation({
onConnect: () => console.log("Connected"),
onDisconnect: () => console.log("Disconnected"),
onMessage: (message) => console.log("Message:", message),
onError: (error) => {
console.error("Error:", error)
setAgentState("disconnected")
},
})
const startConversation = useCallback(async () => {
try {
setErrorMessage(null)
await navigator.mediaDevices.getUserMedia({ audio: true })
await conversation.startSession({
agentId: DEFAULT_AGENT.agentId,
connectionType: "webrtc",
onStatusChange: (status) => setAgentState(status.status),
})
} catch (error) {
console.error("Error starting conversation:", error)
setAgentState("disconnected")
if (error instanceof DOMException && error.name === "NotAllowedError") {
setErrorMessage("Please enable microphone permissions in your browser.")
}
}
}, [conversation])
const handleCall = useCallback(() => {
if (agentState === "disconnected" || agentState === null) {
setAgentState("connecting")
startConversation()
} else if (agentState === "connected") {
conversation.endSession()
setAgentState("disconnected")
}
}, [agentState, conversation, startConversation])
const isCallActive = agentState === "connected"
const isTransitioning =
agentState === "connecting" || agentState === "disconnecting"
const getInputVolume = useCallback(() => {
const rawValue = conversation.getInputVolume?.() ?? 0
return Math.min(1.0, Math.pow(rawValue, 0.5) * 2.5)
}, [conversation])
const getOutputVolume = useCallback(() => {
const rawValue = conversation.getOutputVolume?.() ?? 0
return Math.min(1.0, Math.pow(rawValue, 0.5) * 2.5)
}, [conversation])
return (
<Card className="flex h-[400px] w-full flex-col items-center justify-center overflow-hidden p-6">
<div className="flex flex-col items-center gap-6">
<div className="relative size-32">
<div className="bg-muted relative h-full w-full rounded-full p-1 shadow-[inset_0_2px_8px_rgba(0,0,0,0.1)] dark:shadow-[inset_0_2px_8px_rgba(0,0,0,0.5)]">
<div className="bg-background h-full w-full overflow-hidden rounded-full shadow-[inset_0_0_12px_rgba(0,0,0,0.05)] dark:shadow-[inset_0_0_12px_rgba(0,0,0,0.3)]">
<Orb
className="h-full w-full"
volumeMode="manual"
getInputVolume={getInputVolume}
getOutputVolume={getOutputVolume}
/>
</div>
</div>
</div>
<div className="flex flex-col items-center gap-2">
<h2 className="text-xl font-semibold">{DEFAULT_AGENT.name}</h2>
<AnimatePresence mode="wait">
{errorMessage ? (
<motion.p
key="error"
initial={{ opacity: 0, y: -10 }}
animate={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: 10 }}
className="text-destructive text-center text-sm"
>
{errorMessage}
</motion.p>
) : agentState === "disconnected" || agentState === null ? (
<motion.p
key="disconnected"
initial={{ opacity: 0, y: -10 }}
animate={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: 10 }}
className="text-muted-foreground text-sm"
>
{DEFAULT_AGENT.description}
</motion.p>
) : (
<motion.div
key="status"
initial={{ opacity: 0, y: -10 }}
animate={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: 10 }}
className="flex items-center gap-2"
>
<div
className={cn(
"h-2 w-2 rounded-full transition-all duration-300",
agentState === "connected" && "bg-green-500",
isTransitioning && "bg-primary/60 animate-pulse"
)}
/>
<span className="text-sm capitalize">
{isTransitioning ? (
<ShimmeringText text={agentState} />
) : (
<span className="text-green-600">Connected</span>
)}
</span>
</motion.div>
)}
</AnimatePresence>
</div>
<Button
onClick={handleCall}
disabled={isTransitioning}
size="icon"
variant={isCallActive ? "secondary" : "default"}
className="h-12 w-12 rounded-full"
>
<AnimatePresence mode="wait">
{isTransitioning ? (
<motion.div
key="loading"
initial={{ opacity: 0, rotate: 0 }}
animate={{ opacity: 1, rotate: 360 }}
exit={{ opacity: 0 }}
transition={{
rotate: { duration: 1, repeat: Infinity, ease: "linear" },
}}
>
<Loader2Icon className="h-5 w-5" />
</motion.div>
) : isCallActive ? (
<motion.div
key="end"
initial={{ opacity: 0, scale: 0.5 }}
animate={{ opacity: 1, scale: 1 }}
exit={{ opacity: 0, scale: 0.5 }}
>
<PhoneOffIcon className="h-5 w-5" />
</motion.div>
) : (
<motion.div
key="start"
initial={{ opacity: 0, scale: 0.5 }}
animate={{ opacity: 1, scale: 1 }}
exit={{ opacity: 0, scale: 0.5 }}
>
<PhoneIcon className="h-5 w-5" />
</motion.div>
)}
</AnimatePresence>
</Button>
</div>
</Card>
)
}
Voice chat 2
voice-chat-02
Customer Support
Tap to start voice chat
"use client"
import { useCallback, useState } from "react"
import { useConversation } from "@elevenlabs/react"
import { AnimatePresence, motion } from "framer-motion"
import { Loader2Icon, PhoneIcon, PhoneOffIcon } from "lucide-react"
import { cn } from "@/lib/utils"
import { Button } from "@/components/ui/button"
import { Card } from "@/components/ui/card"
import { Orb } from "@/components/ui/orb"
import { ShimmeringText } from "@/components/ui/shimmering-text"
const DEFAULT_AGENT = {
agentId: process.env.NEXT_PUBLIC_ELEVENLABS_AGENT_ID!,
name: "Customer Support",
description: "Tap to start voice chat",
}
type AgentState =
| "disconnected"
| "connecting"
| "connected"
| "disconnecting"
| null
export default function Page() {
const [agentState, setAgentState] = useState<AgentState>("disconnected")
const [errorMessage, setErrorMessage] = useState<string | null>(null)
const conversation = useConversation({
onConnect: () => console.log("Connected"),
onDisconnect: () => console.log("Disconnected"),
onMessage: (message) => console.log("Message:", message),
onError: (error) => {
console.error("Error:", error)
setAgentState("disconnected")
},
})
const startConversation = useCallback(async () => {
try {
setErrorMessage(null)
await navigator.mediaDevices.getUserMedia({ audio: true })
await conversation.startSession({
agentId: DEFAULT_AGENT.agentId,
connectionType: "webrtc",
onStatusChange: (status) => setAgentState(status.status),
})
} catch (error) {
console.error("Error starting conversation:", error)
setAgentState("disconnected")
if (error instanceof DOMException && error.name === "NotAllowedError") {
setErrorMessage("Please enable microphone permissions in your browser.")
}
}
}, [conversation])
const handleCall = useCallback(() => {
if (agentState === "disconnected" || agentState === null) {
setAgentState("connecting")
startConversation()
} else if (agentState === "connected") {
conversation.endSession()
setAgentState("disconnected")
}
}, [agentState, conversation, startConversation])
const isCallActive = agentState === "connected"
const isTransitioning =
agentState === "connecting" || agentState === "disconnecting"
const getInputVolume = useCallback(() => {
const rawValue = conversation.getInputVolume?.() ?? 0
return Math.min(1.0, Math.pow(rawValue, 0.5) * 2.5)
}, [conversation])
const getOutputVolume = useCallback(() => {
const rawValue = conversation.getOutputVolume?.() ?? 0
return Math.min(1.0, Math.pow(rawValue, 0.5) * 2.5)
}, [conversation])
return (
<Card className="flex h-[400px] w-full flex-col items-center justify-center overflow-hidden p-6">
<div className="flex flex-col items-center gap-6">
<div className="relative size-32">
<div className="bg-muted relative h-full w-full rounded-full p-1 shadow-[inset_0_2px_8px_rgba(0,0,0,0.1)] dark:shadow-[inset_0_2px_8px_rgba(0,0,0,0.5)]">
<div className="bg-background h-full w-full overflow-hidden rounded-full shadow-[inset_0_0_12px_rgba(0,0,0,0.05)] dark:shadow-[inset_0_0_12px_rgba(0,0,0,0.3)]">
<Orb
className="h-full w-full"
volumeMode="manual"
getInputVolume={getInputVolume}
getOutputVolume={getOutputVolume}
/>
</div>
</div>
</div>
<div className="flex flex-col items-center gap-2">
<h2 className="text-xl font-semibold">{DEFAULT_AGENT.name}</h2>
<AnimatePresence mode="wait">
{errorMessage ? (
<motion.p
key="error"
initial={{ opacity: 0, y: -10 }}
animate={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: 10 }}
className="text-destructive text-center text-sm"
>
{errorMessage}
</motion.p>
) : agentState === "disconnected" || agentState === null ? (
<motion.p
key="disconnected"
initial={{ opacity: 0, y: -10 }}
animate={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: 10 }}
className="text-muted-foreground text-sm"
>
{DEFAULT_AGENT.description}
</motion.p>
) : (
<motion.div
key="status"
initial={{ opacity: 0, y: -10 }}
animate={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: 10 }}
className="flex items-center gap-2"
>
<div
className={cn(
"h-2 w-2 rounded-full transition-all duration-300",
agentState === "connected" && "bg-green-500",
isTransitioning && "bg-primary/60 animate-pulse"
)}
/>
<span className="text-sm capitalize">
{isTransitioning ? (
<ShimmeringText text={agentState} />
) : (
<span className="text-green-600">Connected</span>
)}
</span>
</motion.div>
)}
</AnimatePresence>
</div>
<Button
onClick={handleCall}
disabled={isTransitioning}
size="icon"
variant={isCallActive ? "secondary" : "default"}
className="h-12 w-12 rounded-full"
>
<AnimatePresence mode="wait">
{isTransitioning ? (
<motion.div
key="loading"
initial={{ opacity: 0, rotate: 0 }}
animate={{ opacity: 1, rotate: 360 }}
exit={{ opacity: 0 }}
transition={{
rotate: { duration: 1, repeat: Infinity, ease: "linear" },
}}
>
<Loader2Icon className="h-5 w-5" />
</motion.div>
) : isCallActive ? (
<motion.div
key="end"
initial={{ opacity: 0, scale: 0.5 }}
animate={{ opacity: 1, scale: 1 }}
exit={{ opacity: 0, scale: 0.5 }}
>
<PhoneOffIcon className="h-5 w-5" />
</motion.div>
) : (
<motion.div
key="start"
initial={{ opacity: 0, scale: 0.5 }}
animate={{ opacity: 1, scale: 1 }}
exit={{ opacity: 0, scale: 0.5 }}
>
<PhoneIcon className="h-5 w-5" />
</motion.div>
)}
</AnimatePresence>
</Button>
</div>
</Card>
)
}
Files
"use client"
import { useState } from "react"
import { CheckIcon, CopyIcon } from "lucide-react"
import { cn } from "@/lib/utils"
import { Button } from "@/components/ui/button"
import { Card, CardContent } from "@/components/ui/card"
import {
Conversation,
ConversationContent,
ConversationEmptyState,
ConversationScrollButton,
} from "@/components/ui/conversation"
import { ConversationBar } from "@/components/ui/conversation-bar"
import { Message, MessageContent } from "@/components/ui/message"
import { Orb } from "@/components/ui/orb"
import { Response } from "@/components/ui/response"
import {
Tooltip,
TooltipContent,
TooltipProvider,
TooltipTrigger,
} from "@/components/ui/tooltip"
const DEFAULT_AGENT_ID = process.env.NEXT_PUBLIC_ELEVENLABS_AGENT_ID!
interface ChatMessage {
role: "user" | "assistant"
content: string
}
export default function Page() {
const [messages, setMessages] = useState<ChatMessage[]>([])
const [copiedIndex, setCopiedIndex] = useState<number | null>(null)
return (
<div className="relative mx-auto h-[600px] w-full">
<Card className="flex h-full w-full flex-col gap-0 overflow-hidden">
<CardContent className="relative flex-1 overflow-hidden p-0">
<Conversation className="absolute inset-0 pb-[88px]">
<ConversationContent className="flex min-w-0 flex-col gap-2 p-6 pb-6">
{messages.length === 0 ? (
<ConversationEmptyState
icon={<Orb className="size-12" />}
title="Start a conversation"
description="Tap the phone button or type a message"
/>
) : (
messages.map((message, index) => {
return (
<div key={index} className="flex w-full flex-col gap-1">
<Message from={message.role}>
<MessageContent className="max-w-full min-w-0">
<Response className="w-auto [overflow-wrap:anywhere] whitespace-pre-wrap">
{message.content}
</Response>
</MessageContent>
{message.role === "assistant" && (
<div className="ring-border size-6 flex-shrink-0 self-end overflow-hidden rounded-full ring-1">
<Orb className="h-full w-full" />
</div>
)}
</Message>
{message.role === "assistant" && (
<div className="flex items-center gap-1">
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>
<Button
className={cn(
"text-muted-foreground hover:text-foreground relative size-9 p-1.5"
)}
size="sm"
type="button"
variant="ghost"
onClick={() => {
navigator.clipboard.writeText(
message.content
)
setCopiedIndex(index)
setTimeout(() => setCopiedIndex(null), 2000)
}}
>
{copiedIndex === index ? (
<CheckIcon className="size-4" />
) : (
<CopyIcon className="size-4" />
)}
<span className="sr-only">
{copiedIndex === index ? "Copied!" : "Copy"}
</span>
</Button>
</TooltipTrigger>
<TooltipContent>
<p>
{copiedIndex === index ? "Copied!" : "Copy"}
</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
</div>
)}
</div>
)
})
)}
</ConversationContent>
<ConversationScrollButton className="bottom-[100px]" />
</Conversation>
<div className="absolute right-0 bottom-0 left-0 flex justify-center">
<ConversationBar
className="w-full max-w-2xl"
agentId={DEFAULT_AGENT_ID}
onConnect={() => setMessages([])}
onDisconnect={() => setMessages([])}
onSendMessage={(message) => {
const userMessage: ChatMessage = {
role: "user",
content: message,
}
setMessages((prev) => [...prev, userMessage])
}}
onMessage={(message) => {
const newMessage: ChatMessage = {
role: message.source === "user" ? "user" : "assistant",
content: message.message,
}
setMessages((prev) => [...prev, newMessage])
}}
onError={(error) => console.error("Conversation error:", error)}
/>
</div>
</CardContent>
</Card>
</div>
)
}
Voice chat 3
voice-chat-03
Start a conversation
Tap the phone button or type a message
Customer Support
"use client"
import { useState } from "react"
import { CheckIcon, CopyIcon } from "lucide-react"
import { cn } from "@/lib/utils"
import { Button } from "@/components/ui/button"
import { Card, CardContent } from "@/components/ui/card"
import {
Conversation,
ConversationContent,
ConversationEmptyState,
ConversationScrollButton,
} from "@/components/ui/conversation"
import { ConversationBar } from "@/components/ui/conversation-bar"
import { Message, MessageContent } from "@/components/ui/message"
import { Orb } from "@/components/ui/orb"
import { Response } from "@/components/ui/response"
import {
Tooltip,
TooltipContent,
TooltipProvider,
TooltipTrigger,
} from "@/components/ui/tooltip"
const DEFAULT_AGENT_ID = process.env.NEXT_PUBLIC_ELEVENLABS_AGENT_ID!
interface ChatMessage {
role: "user" | "assistant"
content: string
}
export default function Page() {
const [messages, setMessages] = useState<ChatMessage[]>([])
const [copiedIndex, setCopiedIndex] = useState<number | null>(null)
return (
<div className="relative mx-auto h-[600px] w-full">
<Card className="flex h-full w-full flex-col gap-0 overflow-hidden">
<CardContent className="relative flex-1 overflow-hidden p-0">
<Conversation className="absolute inset-0 pb-[88px]">
<ConversationContent className="flex min-w-0 flex-col gap-2 p-6 pb-6">
{messages.length === 0 ? (
<ConversationEmptyState
icon={<Orb className="size-12" />}
title="Start a conversation"
description="Tap the phone button or type a message"
/>
) : (
messages.map((message, index) => {
return (
<div key={index} className="flex w-full flex-col gap-1">
<Message from={message.role}>
<MessageContent className="max-w-full min-w-0">
<Response className="w-auto [overflow-wrap:anywhere] whitespace-pre-wrap">
{message.content}
</Response>
</MessageContent>
{message.role === "assistant" && (
<div className="ring-border size-6 flex-shrink-0 self-end overflow-hidden rounded-full ring-1">
<Orb className="h-full w-full" />
</div>
)}
</Message>
{message.role === "assistant" && (
<div className="flex items-center gap-1">
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>
<Button
className={cn(
"text-muted-foreground hover:text-foreground relative size-9 p-1.5"
)}
size="sm"
type="button"
variant="ghost"
onClick={() => {
navigator.clipboard.writeText(
message.content
)
setCopiedIndex(index)
setTimeout(() => setCopiedIndex(null), 2000)
}}
>
{copiedIndex === index ? (
<CheckIcon className="size-4" />
) : (
<CopyIcon className="size-4" />
)}
<span className="sr-only">
{copiedIndex === index ? "Copied!" : "Copy"}
</span>
</Button>
</TooltipTrigger>
<TooltipContent>
<p>
{copiedIndex === index ? "Copied!" : "Copy"}
</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
</div>
)}
</div>
)
})
)}
</ConversationContent>
<ConversationScrollButton className="bottom-[100px]" />
</Conversation>
<div className="absolute right-0 bottom-0 left-0 flex justify-center">
<ConversationBar
className="w-full max-w-2xl"
agentId={DEFAULT_AGENT_ID}
onConnect={() => setMessages([])}
onDisconnect={() => setMessages([])}
onSendMessage={(message) => {
const userMessage: ChatMessage = {
role: "user",
content: message,
}
setMessages((prev) => [...prev, userMessage])
}}
onMessage={(message) => {
const newMessage: ChatMessage = {
role: message.source === "user" ? "user" : "assistant",
content: message.message,
}
setMessages((prev) => [...prev, newMessage])
}}
onError={(error) => console.error("Conversation error:", error)}
/>
</div>
</CardContent>
</Card>
</div>
)
}