Examples
A collection of building blocks for agents and audio that you can customize and extend.
Files
"use client"
import React, { useCallback, useEffect, useMemo, useRef, useState } from "react"
import Link from "next/link"
import { useScribe } from "@elevenlabs/react"
import { AnimatePresence, motion } from "framer-motion"
import { Copy } from "lucide-react"
import { cn } from "@/lib/utils"
import { useDebounce } from "@/hooks/use-debounce"
import { usePrevious } from "@/hooks/use-previous"
import { Badge } from "@/components/ui/badge"
import { Button } from "@/components/ui/button"
import { ShimmeringText } from "@/components/ui/shimmering-text"
import { getScribeToken } from "./actions/get-scribe-token"
import { LanguageSelector } from "./components/language-selector"
interface RecordingState {
error: string
latenciesMs: number[]
}
type ConnectionState = "idle" | "connecting" | "connected" | "disconnecting"
const TranscriptCharacter = React.memo(
({ char, delay }: { char: string; delay: number }) => {
return (
<motion.span
initial={{ filter: `blur(3.5px)`, opacity: 0 }}
animate={{ filter: `none`, opacity: 1 }}
transition={{ duration: 0.5, delay }}
style={{ willChange: delay > 0 ? "filter, opacity" : "auto" }}
>
{char}
</motion.span>
)
}
)
TranscriptCharacter.displayName = "TranscriptCharacter"
// Memoize background effects to prevent re-renders
const BackgroundAura = React.memo(
({ status, isConnected }: { status: string; isConnected: boolean }) => {
const isActive = status === "connecting" || isConnected
return (
<div
className={cn(
"pointer-events-none fixed inset-0 transition-opacity duration-300 ease-out",
isActive ? "opacity-100" : "opacity-0"
)}
>
{/* Center bottom pool - main glow */}
<div
className="absolute bottom-0 left-1/2 -translate-x-1/2"
style={{
width: "130%",
height: "20vh",
background:
"radial-gradient(ellipse 100% 100% at 50% 100%, rgba(34, 211, 238, 0.5) 0%, rgba(168, 85, 247, 0.4) 35%, rgba(251, 146, 60, 0.5) 70%, transparent 100%)",
filter: "blur(80px)",
}}
/>
{/* Pulsing layer */}
<div
className={cn(
"absolute bottom-0 left-1/2 -translate-x-1/2 animate-pulse",
isConnected ? "opacity-100" : "opacity-80"
)}
style={{
width: "100%",
height: "18vh",
background:
"radial-gradient(ellipse 100% 100% at 50% 100%, rgba(134, 239, 172, 0.5) 0%, rgba(192, 132, 252, 0.4) 50%, transparent 100%)",
filter: "blur(60px)",
animationDuration: "4s",
}}
/>
{/* Left corner bloom */}
<div
className="absolute bottom-0 left-0"
style={{
width: "25vw",
height: "30vh",
background:
"radial-gradient(circle at 0% 100%, rgba(34, 211, 238, 0.5) 0%, rgba(134, 239, 172, 0.3) 30%, transparent 60%)",
filter: "blur(70px)",
}}
/>
{/* Left rising glow - organic curve */}
<div
className="absolute bottom-0 -left-8"
style={{
width: "20vw",
height: "45vh",
background:
"radial-gradient(ellipse 50% 100% at 10% 100%, rgba(34, 211, 238, 0.4) 0%, rgba(134, 239, 172, 0.25) 25%, transparent 60%)",
filter: "blur(60px)",
animation: "pulseGlow 5s ease-in-out infinite alternate",
}}
/>
{/* Right corner bloom */}
<div
className="absolute right-0 bottom-0"
style={{
width: "25vw",
height: "30vh",
background:
"radial-gradient(circle at 100% 100%, rgba(251, 146, 60, 0.5) 0%, rgba(251, 146, 60, 0.3) 30%, transparent 60%)",
filter: "blur(70px)",
}}
/>
{/* Right rising glow - organic curve */}
<div
className="absolute -right-8 bottom-0"
style={{
width: "20vw",
height: "45vh",
background:
"radial-gradient(ellipse 50% 100% at 90% 100%, rgba(251, 146, 60, 0.4) 0%, rgba(192, 132, 252, 0.25) 25%, transparent 60%)",
filter: "blur(60px)",
animation: "pulseGlow 5s ease-in-out infinite alternate-reverse",
}}
/>
{/* Shimmer overlay */}
<div
className="absolute bottom-0 left-1/2 -translate-x-1/2"
style={{
width: "100%",
height: "15vh",
background:
"linear-gradient(90deg, rgba(34, 211, 238, 0.3) 0%, rgba(168, 85, 247, 0.3) 30%, rgba(251, 146, 60, 0.3) 60%, rgba(134, 239, 172, 0.3) 100%)",
filter: "blur(30px)",
animation: "shimmer 8s linear infinite",
}}
/>
</div>
)
}
)
BackgroundAura.displayName = "BackgroundAura"
// Memoize bottom controls with comparison function
const BottomControls = React.memo(
({
isConnected,
hasError,
isMac,
onStop,
}: {
isConnected: boolean
hasError: boolean
isMac: boolean
onStop: () => void
}) => {
return (
<AnimatePresence mode="popLayout">
{isConnected && !hasError && (
<motion.div
key="bottom-controls"
initial={{ opacity: 0, y: 10 }}
animate={{
opacity: 1,
y: 0,
transition: { duration: 0.1 },
}}
exit={{
opacity: 0,
y: 10,
transition: { duration: 0.1 },
}}
className="fixed bottom-8 left-1/2 z-50 flex -translate-x-1/2 items-center gap-2"
>
<button
onClick={onStop}
className="bg-foreground text-background border-foreground/10 inline-flex items-center gap-2 rounded-lg border px-3 py-2 text-sm font-medium shadow-lg transition-opacity hover:opacity-90"
>
Stop
<kbd className="border-background/20 bg-background/10 inline-flex h-5 items-center rounded border px-1.5 font-mono text-xs">
{isMac ? "⌘K" : "Ctrl+K"}
</kbd>
</button>
</motion.div>
)}
</AnimatePresence>
)
},
(prev, next) => {
if (prev.isConnected !== next.isConnected) return false
if (prev.hasError !== next.hasError) return false
if (prev.isMac !== next.isMac) return false
return true
}
)
BottomControls.displayName = "BottomControls"
export default function RealtimeTranscriber01() {
const [recording, setRecording] = useState<RecordingState>({
error: "",
latenciesMs: [],
})
const [selectedLanguage, setSelectedLanguage] = useState<string | null>(null)
const [connectionState, setConnectionStateState] =
useState<ConnectionState>("idle")
const [localTranscript, setLocalTranscript] = useState("")
const [isMac, setIsMac] = useState(true)
useEffect(() => {
setIsMac(/(Mac|iPhone|iPod|iPad)/i.test(navigator.userAgent))
}, [])
const segmentStartMsRef = useRef<number | null>(null)
const lastTranscriptRef = useRef<string>("")
const finalTranscriptsRef = useRef<string[]>([])
const startSoundRef = useRef<HTMLAudioElement | null>(null)
const endSoundRef = useRef<HTMLAudioElement | null>(null)
const errorSoundRef = useRef<HTMLAudioElement | null>(null)
const errorTimeoutRef = useRef<NodeJS.Timeout | null>(null)
const lastOperationTimeRef = useRef(0)
const timerIntervalRef = useRef<NodeJS.Timeout | null>(null)
const connectionStateRef = useRef<ConnectionState>("idle")
const updateConnectionState = useCallback(
(next: ConnectionState) => {
connectionStateRef.current = next
setConnectionStateState(next)
},
[setConnectionStateState]
)
const clearSessionRefs = useCallback(() => {
if (timerIntervalRef.current) {
clearInterval(timerIntervalRef.current)
timerIntervalRef.current = null
}
if (errorTimeoutRef.current) {
clearTimeout(errorTimeoutRef.current)
errorTimeoutRef.current = null
}
segmentStartMsRef.current = null
lastTranscriptRef.current = ""
finalTranscriptsRef.current = []
}, [])
// === Callbacks for Scribe ===
const onPartialTranscript = useCallback((data: { text?: string }) => {
// Only process if we're connected
if (connectionStateRef.current !== "connected") return
const currentText = data.text || ""
if (currentText === lastTranscriptRef.current) return
lastTranscriptRef.current = currentText
// Update local transcript with partial
const fullText = finalTranscriptsRef.current.join(" ")
const combined = fullText ? `${fullText} ${currentText}` : currentText
setLocalTranscript(combined)
if (currentText.length > 0 && segmentStartMsRef.current != null) {
const latency = performance.now() - segmentStartMsRef.current
setRecording((prev) => ({
...prev,
latenciesMs: [...prev.latenciesMs.slice(-29), latency],
}))
segmentStartMsRef.current = null
}
}, [])
const onFinalTranscript = useCallback((data: { text?: string }) => {
// Only process if we're connected
if (connectionStateRef.current !== "connected") return
lastTranscriptRef.current = ""
if (data.text && data.text.length > 0) {
// Add to final transcripts
finalTranscriptsRef.current = [...finalTranscriptsRef.current, data.text]
// Update local transcript
setLocalTranscript(finalTranscriptsRef.current.join(" "))
if (segmentStartMsRef.current != null) {
const latency = performance.now() - segmentStartMsRef.current
setRecording((prev) => ({
...prev,
latenciesMs: [...prev.latenciesMs.slice(-29), latency],
}))
}
}
segmentStartMsRef.current = null
}, [])
const onError = useCallback((error: Error | Event) => {
console.error("[Scribe] Error:", error)
// Ignore errors if we're not supposed to be connected
if (connectionStateRef.current !== "connected") {
console.log("[Scribe] Ignoring error - not connected")
return
}
const errorMessage =
error instanceof Error ? error.message : "Transcription error"
if (errorTimeoutRef.current) {
clearTimeout(errorTimeoutRef.current)
}
errorTimeoutRef.current = setTimeout(() => {
if (connectionStateRef.current !== "connected") return
setRecording((prev) => ({
...prev,
error: errorMessage,
}))
errorSoundRef.current?.play().catch(() => {})
}, 500)
}, [])
const scribeConfig = useMemo(
() => ({
modelId: "scribe_realtime_v2" as const,
onPartialTranscript,
onFinalTranscript,
onError,
}),
[onPartialTranscript, onFinalTranscript, onError]
)
const scribe = useScribe(scribeConfig)
// Clear transcript when not connected
useEffect(() => {
if (connectionState !== "connected") {
setLocalTranscript("")
}
}, [connectionState])
// Simulate audio chunk timing for latency measurement
useEffect(() => {
// Clear any existing interval
if (timerIntervalRef.current) {
clearInterval(timerIntervalRef.current)
timerIntervalRef.current = null
}
if (connectionState !== "connected") return
timerIntervalRef.current = setInterval(() => {
if (segmentStartMsRef.current === null) {
segmentStartMsRef.current = performance.now()
}
}, 100)
return () => {
if (timerIntervalRef.current) {
clearInterval(timerIntervalRef.current)
timerIntervalRef.current = null
}
}
}, [connectionState])
const handleToggleRecording = useCallback(async () => {
const now = Date.now()
const timeSinceLastOp = now - lastOperationTimeRef.current
// DISCONNECT
if (connectionState === "connected" || connectionState === "connecting") {
console.log("[Scribe] Disconnecting...")
// 1. Update UI state immediately
updateConnectionState("idle")
setLocalTranscript("")
setRecording({ error: "", latenciesMs: [] })
clearSessionRefs()
// 2. Disconnect (async, don't wait)
try {
scribe.disconnect()
scribe.clearTranscripts()
} catch {
// Ignore errors
}
// 3. Play sound
if (endSoundRef.current) {
endSoundRef.current.currentTime = 0
endSoundRef.current.play().catch(() => {})
}
lastOperationTimeRef.current = now
return
}
// Debounce rapid clicks for CONNECT
if (timeSinceLastOp < 200) {
console.log("[Scribe] Ignoring rapid click")
return
}
lastOperationTimeRef.current = now
// CONNECT
if (connectionState !== "idle") {
console.log("[Scribe] Not in idle state, ignoring")
return
}
console.log("[Scribe] Connecting...")
updateConnectionState("connecting")
setLocalTranscript("")
setRecording({ error: "", latenciesMs: [] })
clearSessionRefs()
try {
const result = await getScribeToken()
// Check if user cancelled using ref (gets current value)
if (connectionStateRef.current === "idle") {
console.log("[Scribe] Cancelled during token fetch")
return
}
if (result.error || !result.token) {
throw new Error(result.error || "Failed to get token")
}
await scribe.connect({
token: result.token,
languageCode: selectedLanguage || undefined,
microphone: {
echoCancellation: false,
noiseSuppression: false,
autoGainControl: true,
},
})
// Check again after connect completes
if (connectionStateRef.current !== "connecting") {
console.log("[Scribe] Cancelled after connection")
try {
scribe.disconnect()
} catch {
// Ignore
}
return
}
console.log("[Scribe] Connected")
updateConnectionState("connected")
// Play start sound
if (startSoundRef.current) {
startSoundRef.current.currentTime = 0
startSoundRef.current.play().catch(() => {})
}
} catch (error) {
console.error("[Scribe] Connection error:", error)
updateConnectionState("idle")
setRecording((prev) => ({
...prev,
error: error instanceof Error ? error.message : "Connection failed",
}))
}
}, [
clearSessionRefs,
connectionState,
scribe,
selectedLanguage,
updateConnectionState,
])
// Cmd+K / Ctrl+K shortcut
useEffect(() => {
const handleKeyDown = (e: KeyboardEvent) => {
if (
e.key === "k" &&
(e.metaKey || e.ctrlKey) &&
e.target instanceof HTMLElement &&
!["INPUT", "TEXTAREA"].includes(e.target.tagName)
) {
e.preventDefault()
handleToggleRecording()
}
}
window.addEventListener("keydown", handleKeyDown)
return () => {
window.removeEventListener("keydown", handleKeyDown)
}
}, [handleToggleRecording])
// Note: No unmount cleanup - React Strict Mode causes issues
// The browser will handle websocket cleanup on page unload
// Preload audio files on mount (no auto-play)
useEffect(() => {
const sounds = [
{
ref: startSoundRef,
url: "https://ui.elevenlabs.io/sounds/transcriber-start.mp3",
},
{
ref: endSoundRef,
url: "https://ui.elevenlabs.io/sounds/transcriber-end.mp3",
},
{
ref: errorSoundRef,
url: "https://ui.elevenlabs.io/sounds/transcriber-error.mp3",
},
]
sounds.forEach(({ ref, url }) => {
const audio = new Audio(url)
audio.volume = 0.6
audio.preload = "auto"
audio.load()
ref.current = audio
})
}, [])
// Display text: prefer error, then local transcript
const displayText = recording.error || localTranscript
const hasContent = Boolean(displayText) && connectionState === "connected"
// Determine if current transcript is partial (for styling)
const isPartial = Boolean(lastTranscriptRef.current)
return (
<div className="relative mx-auto flex h-full w-full max-w-4xl flex-col items-center justify-center">
<BackgroundAura
status={connectionState === "connecting" ? "connecting" : scribe.status}
isConnected={connectionState === "connected"}
/>
<style jsx>{`
@keyframes shimmer {
0% {
transform: translateX(-20%) scale(1);
}
50% {
transform: translateX(20%) scale(1.1);
}
100% {
transform: translateX(-20%) scale(1);
}
}
@keyframes drift {
0% {
transform: translateX(-10%) scale(1);
}
100% {
transform: translateX(10%) scale(1.05);
}
}
@keyframes pulseGlow {
0% {
opacity: 0.5;
transform: translateY(0) scale(1);
}
100% {
opacity: 0.8;
transform: translateY(-5%) scale(1.02);
}
}
`}</style>
<div className="relative flex h-full w-full flex-col items-center justify-center gap-8 overflow-hidden px-8 py-12">
{/* Main transcript area */}
<div className="relative flex min-h-[350px] w-full flex-1 items-center justify-center overflow-hidden">
{/* Transcript - shown when there's content */}
<div
className={cn(
"absolute inset-0 transition-opacity duration-250",
hasContent ? "opacity-100" : "pointer-events-none opacity-0"
)}
>
{hasContent && (
<TranscriberTranscript
transcript={displayText}
error={recording.error}
isPartial={isPartial}
isConnected={connectionState === "connected"}
/>
)}
</div>
{/* Status text - shown when no content */}
<div
className={cn(
"absolute inset-0 flex items-center justify-center transition-opacity duration-250",
!hasContent ? "opacity-100" : "pointer-events-none opacity-0"
)}
>
<div
className={cn(
"absolute transition-opacity duration-250",
connectionState === "connecting"
? "opacity-100"
: "pointer-events-none opacity-0"
)}
>
<ShimmeringText
text="Connecting..."
className="text-2xl font-light tracking-wide whitespace-nowrap"
/>
</div>
<div
className={cn(
"absolute transition-opacity duration-250",
connectionState === "connected" && !hasContent
? "opacity-100"
: "pointer-events-none opacity-0"
)}
>
<ShimmeringText
text="Say something aloud..."
className="text-3xl font-light tracking-wide whitespace-nowrap"
/>
</div>
</div>
{/* Language selector and button - only shown when not connected */}
<div
className={cn(
"absolute inset-0 flex items-center justify-center transition-opacity duration-250",
connectionState === "idle"
? "opacity-100"
: "pointer-events-none opacity-0"
)}
>
<div className="flex w-full max-w-sm flex-col gap-4 px-8">
<div className="flex flex-col items-center gap-6">
<div className="flex flex-col items-center gap-2 text-center">
<h1 className="text-2xl font-semibold tracking-tight">
Realtime Speech to Text
</h1>
<p className="text-muted-foreground text-sm">
Transcribe your voice in real-time with high accuracy
</p>
</div>
<div className="w-full space-y-2">
<label className="text-foreground/70 text-sm font-medium">
Language
</label>
<LanguageSelector
value={selectedLanguage}
onValueChange={setSelectedLanguage}
disabled={connectionState !== "idle"}
/>
</div>
<Button
onClick={handleToggleRecording}
disabled={false}
size="lg"
className="bg-foreground/95 hover:bg-foreground/90 w-full justify-center gap-3"
>
<span>Start Transcribing</span>
<kbd className="border-background/20 bg-background/10 hidden h-5 items-center gap-1 rounded border px-1.5 font-mono text-xs sm:inline-flex">
{isMac ? "⌘K" : "Ctrl+K"}
</kbd>
</Button>
<Badge variant="outline" asChild>
<Link
href="https://elevenlabs.io/speech-to-text"
target="_blank"
rel="noopener noreferrer"
className="text-foreground/60 hover:text-foreground/80 transition-colors"
>
Powered by ElevenLabs Speech to Text
</Link>
</Badge>
</div>
</div>
</div>
</div>
<BottomControls
isConnected={connectionState === "connected"}
hasError={Boolean(recording.error)}
isMac={isMac}
onStop={handleToggleRecording}
/>
</div>
</div>
)
}
const TranscriberTranscript = React.memo(
({
transcript,
error,
isPartial,
isConnected,
}: {
transcript: string
error: string
isPartial?: boolean
isConnected: boolean
}) => {
const characters = useMemo(() => transcript.split(""), [transcript])
const previousNumChars = useDebounce(
usePrevious(characters.length) || 0,
100
)
const scrollRef = useRef<HTMLDivElement>(null)
const scrollTimeoutRef = useRef<NodeJS.Timeout | null>(null)
// Auto-scroll to bottom when connected and text is updating
useEffect(() => {
if (isConnected && scrollRef.current) {
if (scrollTimeoutRef.current) {
clearTimeout(scrollTimeoutRef.current)
}
scrollTimeoutRef.current = setTimeout(() => {
if (scrollRef.current) {
scrollRef.current.scrollTop = scrollRef.current.scrollHeight
}
}, 50)
}
return () => {
if (scrollTimeoutRef.current) {
clearTimeout(scrollTimeoutRef.current)
}
}
}, [transcript, isConnected])
return (
<div className="absolute inset-0 flex flex-col">
<div ref={scrollRef} className="flex-1 overflow-auto">
<div
className={cn(
"min-h-[50%] w-full px-12 py-8",
isConnected && "absolute bottom-16"
)}
>
<div
className={cn(
"text-foreground/90 w-full text-xl leading-relaxed font-light",
error && "text-red-500",
isPartial && !error && "text-foreground/60"
)}
>
{characters.map((char, index) => {
const delay =
index >= previousNumChars
? (index - previousNumChars + 1) * 0.012
: 0
return (
<TranscriptCharacter key={index} char={char} delay={delay} />
)
})}
</div>
</div>
</div>
{transcript && !error && !isPartial && (
<Button
variant="ghost"
size="icon"
className="absolute top-4 right-4 h-8 w-8 opacity-0 transition-opacity hover:opacity-60"
onClick={() => {
navigator.clipboard.writeText(transcript)
}}
aria-label="Copy transcript"
>
<Copy className="h-4 w-4" />
</Button>
)}
</div>
)
}
)
TranscriberTranscript.displayName = "TranscriberTranscript"
Scribe V2 Realtime Transcriber
realtime-transcriber-01
Connecting...
Say something aloud...
Realtime Speech to Text
Transcribe your voice in real-time with high accuracy
"use client"
import React, { useCallback, useEffect, useMemo, useRef, useState } from "react"
import Link from "next/link"
import { useScribe } from "@elevenlabs/react"
import { AnimatePresence, motion } from "framer-motion"
import { Copy } from "lucide-react"
import { cn } from "@/lib/utils"
import { useDebounce } from "@/hooks/use-debounce"
import { usePrevious } from "@/hooks/use-previous"
import { Badge } from "@/components/ui/badge"
import { Button } from "@/components/ui/button"
import { ShimmeringText } from "@/components/ui/shimmering-text"
import { getScribeToken } from "./actions/get-scribe-token"
import { LanguageSelector } from "./components/language-selector"
interface RecordingState {
error: string
latenciesMs: number[]
}
type ConnectionState = "idle" | "connecting" | "connected" | "disconnecting"
const TranscriptCharacter = React.memo(
({ char, delay }: { char: string; delay: number }) => {
return (
<motion.span
initial={{ filter: `blur(3.5px)`, opacity: 0 }}
animate={{ filter: `none`, opacity: 1 }}
transition={{ duration: 0.5, delay }}
style={{ willChange: delay > 0 ? "filter, opacity" : "auto" }}
>
{char}
</motion.span>
)
}
)
TranscriptCharacter.displayName = "TranscriptCharacter"
// Memoize background effects to prevent re-renders
const BackgroundAura = React.memo(
({ status, isConnected }: { status: string; isConnected: boolean }) => {
const isActive = status === "connecting" || isConnected
return (
<div
className={cn(
"pointer-events-none fixed inset-0 transition-opacity duration-300 ease-out",
isActive ? "opacity-100" : "opacity-0"
)}
>
{/* Center bottom pool - main glow */}
<div
className="absolute bottom-0 left-1/2 -translate-x-1/2"
style={{
width: "130%",
height: "20vh",
background:
"radial-gradient(ellipse 100% 100% at 50% 100%, rgba(34, 211, 238, 0.5) 0%, rgba(168, 85, 247, 0.4) 35%, rgba(251, 146, 60, 0.5) 70%, transparent 100%)",
filter: "blur(80px)",
}}
/>
{/* Pulsing layer */}
<div
className={cn(
"absolute bottom-0 left-1/2 -translate-x-1/2 animate-pulse",
isConnected ? "opacity-100" : "opacity-80"
)}
style={{
width: "100%",
height: "18vh",
background:
"radial-gradient(ellipse 100% 100% at 50% 100%, rgba(134, 239, 172, 0.5) 0%, rgba(192, 132, 252, 0.4) 50%, transparent 100%)",
filter: "blur(60px)",
animationDuration: "4s",
}}
/>
{/* Left corner bloom */}
<div
className="absolute bottom-0 left-0"
style={{
width: "25vw",
height: "30vh",
background:
"radial-gradient(circle at 0% 100%, rgba(34, 211, 238, 0.5) 0%, rgba(134, 239, 172, 0.3) 30%, transparent 60%)",
filter: "blur(70px)",
}}
/>
{/* Left rising glow - organic curve */}
<div
className="absolute bottom-0 -left-8"
style={{
width: "20vw",
height: "45vh",
background:
"radial-gradient(ellipse 50% 100% at 10% 100%, rgba(34, 211, 238, 0.4) 0%, rgba(134, 239, 172, 0.25) 25%, transparent 60%)",
filter: "blur(60px)",
animation: "pulseGlow 5s ease-in-out infinite alternate",
}}
/>
{/* Right corner bloom */}
<div
className="absolute right-0 bottom-0"
style={{
width: "25vw",
height: "30vh",
background:
"radial-gradient(circle at 100% 100%, rgba(251, 146, 60, 0.5) 0%, rgba(251, 146, 60, 0.3) 30%, transparent 60%)",
filter: "blur(70px)",
}}
/>
{/* Right rising glow - organic curve */}
<div
className="absolute -right-8 bottom-0"
style={{
width: "20vw",
height: "45vh",
background:
"radial-gradient(ellipse 50% 100% at 90% 100%, rgba(251, 146, 60, 0.4) 0%, rgba(192, 132, 252, 0.25) 25%, transparent 60%)",
filter: "blur(60px)",
animation: "pulseGlow 5s ease-in-out infinite alternate-reverse",
}}
/>
{/* Shimmer overlay */}
<div
className="absolute bottom-0 left-1/2 -translate-x-1/2"
style={{
width: "100%",
height: "15vh",
background:
"linear-gradient(90deg, rgba(34, 211, 238, 0.3) 0%, rgba(168, 85, 247, 0.3) 30%, rgba(251, 146, 60, 0.3) 60%, rgba(134, 239, 172, 0.3) 100%)",
filter: "blur(30px)",
animation: "shimmer 8s linear infinite",
}}
/>
</div>
)
}
)
BackgroundAura.displayName = "BackgroundAura"
// Memoize bottom controls with comparison function
const BottomControls = React.memo(
({
isConnected,
hasError,
isMac,
onStop,
}: {
isConnected: boolean
hasError: boolean
isMac: boolean
onStop: () => void
}) => {
return (
<AnimatePresence mode="popLayout">
{isConnected && !hasError && (
<motion.div
key="bottom-controls"
initial={{ opacity: 0, y: 10 }}
animate={{
opacity: 1,
y: 0,
transition: { duration: 0.1 },
}}
exit={{
opacity: 0,
y: 10,
transition: { duration: 0.1 },
}}
className="fixed bottom-8 left-1/2 z-50 flex -translate-x-1/2 items-center gap-2"
>
<button
onClick={onStop}
className="bg-foreground text-background border-foreground/10 inline-flex items-center gap-2 rounded-lg border px-3 py-2 text-sm font-medium shadow-lg transition-opacity hover:opacity-90"
>
Stop
<kbd className="border-background/20 bg-background/10 inline-flex h-5 items-center rounded border px-1.5 font-mono text-xs">
{isMac ? "⌘K" : "Ctrl+K"}
</kbd>
</button>
</motion.div>
)}
</AnimatePresence>
)
},
(prev, next) => {
if (prev.isConnected !== next.isConnected) return false
if (prev.hasError !== next.hasError) return false
if (prev.isMac !== next.isMac) return false
return true
}
)
BottomControls.displayName = "BottomControls"
export default function RealtimeTranscriber01() {
const [recording, setRecording] = useState<RecordingState>({
error: "",
latenciesMs: [],
})
const [selectedLanguage, setSelectedLanguage] = useState<string | null>(null)
const [connectionState, setConnectionStateState] =
useState<ConnectionState>("idle")
const [localTranscript, setLocalTranscript] = useState("")
const [isMac, setIsMac] = useState(true)
useEffect(() => {
setIsMac(/(Mac|iPhone|iPod|iPad)/i.test(navigator.userAgent))
}, [])
const segmentStartMsRef = useRef<number | null>(null)
const lastTranscriptRef = useRef<string>("")
const finalTranscriptsRef = useRef<string[]>([])
const startSoundRef = useRef<HTMLAudioElement | null>(null)
const endSoundRef = useRef<HTMLAudioElement | null>(null)
const errorSoundRef = useRef<HTMLAudioElement | null>(null)
const errorTimeoutRef = useRef<NodeJS.Timeout | null>(null)
const lastOperationTimeRef = useRef(0)
const timerIntervalRef = useRef<NodeJS.Timeout | null>(null)
const connectionStateRef = useRef<ConnectionState>("idle")
const updateConnectionState = useCallback(
(next: ConnectionState) => {
connectionStateRef.current = next
setConnectionStateState(next)
},
[setConnectionStateState]
)
const clearSessionRefs = useCallback(() => {
if (timerIntervalRef.current) {
clearInterval(timerIntervalRef.current)
timerIntervalRef.current = null
}
if (errorTimeoutRef.current) {
clearTimeout(errorTimeoutRef.current)
errorTimeoutRef.current = null
}
segmentStartMsRef.current = null
lastTranscriptRef.current = ""
finalTranscriptsRef.current = []
}, [])
// === Callbacks for Scribe ===
const onPartialTranscript = useCallback((data: { text?: string }) => {
// Only process if we're connected
if (connectionStateRef.current !== "connected") return
const currentText = data.text || ""
if (currentText === lastTranscriptRef.current) return
lastTranscriptRef.current = currentText
// Update local transcript with partial
const fullText = finalTranscriptsRef.current.join(" ")
const combined = fullText ? `${fullText} ${currentText}` : currentText
setLocalTranscript(combined)
if (currentText.length > 0 && segmentStartMsRef.current != null) {
const latency = performance.now() - segmentStartMsRef.current
setRecording((prev) => ({
...prev,
latenciesMs: [...prev.latenciesMs.slice(-29), latency],
}))
segmentStartMsRef.current = null
}
}, [])
const onFinalTranscript = useCallback((data: { text?: string }) => {
// Only process if we're connected
if (connectionStateRef.current !== "connected") return
lastTranscriptRef.current = ""
if (data.text && data.text.length > 0) {
// Add to final transcripts
finalTranscriptsRef.current = [...finalTranscriptsRef.current, data.text]
// Update local transcript
setLocalTranscript(finalTranscriptsRef.current.join(" "))
if (segmentStartMsRef.current != null) {
const latency = performance.now() - segmentStartMsRef.current
setRecording((prev) => ({
...prev,
latenciesMs: [...prev.latenciesMs.slice(-29), latency],
}))
}
}
segmentStartMsRef.current = null
}, [])
const onError = useCallback((error: Error | Event) => {
console.error("[Scribe] Error:", error)
// Ignore errors if we're not supposed to be connected
if (connectionStateRef.current !== "connected") {
console.log("[Scribe] Ignoring error - not connected")
return
}
const errorMessage =
error instanceof Error ? error.message : "Transcription error"
if (errorTimeoutRef.current) {
clearTimeout(errorTimeoutRef.current)
}
errorTimeoutRef.current = setTimeout(() => {
if (connectionStateRef.current !== "connected") return
setRecording((prev) => ({
...prev,
error: errorMessage,
}))
errorSoundRef.current?.play().catch(() => {})
}, 500)
}, [])
const scribeConfig = useMemo(
() => ({
modelId: "scribe_realtime_v2" as const,
onPartialTranscript,
onFinalTranscript,
onError,
}),
[onPartialTranscript, onFinalTranscript, onError]
)
const scribe = useScribe(scribeConfig)
// Clear transcript when not connected
useEffect(() => {
if (connectionState !== "connected") {
setLocalTranscript("")
}
}, [connectionState])
// Simulate audio chunk timing for latency measurement
useEffect(() => {
// Clear any existing interval
if (timerIntervalRef.current) {
clearInterval(timerIntervalRef.current)
timerIntervalRef.current = null
}
if (connectionState !== "connected") return
timerIntervalRef.current = setInterval(() => {
if (segmentStartMsRef.current === null) {
segmentStartMsRef.current = performance.now()
}
}, 100)
return () => {
if (timerIntervalRef.current) {
clearInterval(timerIntervalRef.current)
timerIntervalRef.current = null
}
}
}, [connectionState])
const handleToggleRecording = useCallback(async () => {
const now = Date.now()
const timeSinceLastOp = now - lastOperationTimeRef.current
// DISCONNECT
if (connectionState === "connected" || connectionState === "connecting") {
console.log("[Scribe] Disconnecting...")
// 1. Update UI state immediately
updateConnectionState("idle")
setLocalTranscript("")
setRecording({ error: "", latenciesMs: [] })
clearSessionRefs()
// 2. Disconnect (async, don't wait)
try {
scribe.disconnect()
scribe.clearTranscripts()
} catch {
// Ignore errors
}
// 3. Play sound
if (endSoundRef.current) {
endSoundRef.current.currentTime = 0
endSoundRef.current.play().catch(() => {})
}
lastOperationTimeRef.current = now
return
}
// Debounce rapid clicks for CONNECT
if (timeSinceLastOp < 200) {
console.log("[Scribe] Ignoring rapid click")
return
}
lastOperationTimeRef.current = now
// CONNECT
if (connectionState !== "idle") {
console.log("[Scribe] Not in idle state, ignoring")
return
}
console.log("[Scribe] Connecting...")
updateConnectionState("connecting")
setLocalTranscript("")
setRecording({ error: "", latenciesMs: [] })
clearSessionRefs()
try {
const result = await getScribeToken()
// Check if user cancelled using ref (gets current value)
if (connectionStateRef.current === "idle") {
console.log("[Scribe] Cancelled during token fetch")
return
}
if (result.error || !result.token) {
throw new Error(result.error || "Failed to get token")
}
await scribe.connect({
token: result.token,
languageCode: selectedLanguage || undefined,
microphone: {
echoCancellation: false,
noiseSuppression: false,
autoGainControl: true,
},
})
// Check again after connect completes
if (connectionStateRef.current !== "connecting") {
console.log("[Scribe] Cancelled after connection")
try {
scribe.disconnect()
} catch {
// Ignore
}
return
}
console.log("[Scribe] Connected")
updateConnectionState("connected")
// Play start sound
if (startSoundRef.current) {
startSoundRef.current.currentTime = 0
startSoundRef.current.play().catch(() => {})
}
} catch (error) {
console.error("[Scribe] Connection error:", error)
updateConnectionState("idle")
setRecording((prev) => ({
...prev,
error: error instanceof Error ? error.message : "Connection failed",
}))
}
}, [
clearSessionRefs,
connectionState,
scribe,
selectedLanguage,
updateConnectionState,
])
// Cmd+K / Ctrl+K shortcut
useEffect(() => {
const handleKeyDown = (e: KeyboardEvent) => {
if (
e.key === "k" &&
(e.metaKey || e.ctrlKey) &&
e.target instanceof HTMLElement &&
!["INPUT", "TEXTAREA"].includes(e.target.tagName)
) {
e.preventDefault()
handleToggleRecording()
}
}
window.addEventListener("keydown", handleKeyDown)
return () => {
window.removeEventListener("keydown", handleKeyDown)
}
}, [handleToggleRecording])
// Note: No unmount cleanup - React Strict Mode causes issues
// The browser will handle websocket cleanup on page unload
// Preload audio files on mount (no auto-play)
useEffect(() => {
const sounds = [
{
ref: startSoundRef,
url: "https://ui.elevenlabs.io/sounds/transcriber-start.mp3",
},
{
ref: endSoundRef,
url: "https://ui.elevenlabs.io/sounds/transcriber-end.mp3",
},
{
ref: errorSoundRef,
url: "https://ui.elevenlabs.io/sounds/transcriber-error.mp3",
},
]
sounds.forEach(({ ref, url }) => {
const audio = new Audio(url)
audio.volume = 0.6
audio.preload = "auto"
audio.load()
ref.current = audio
})
}, [])
// Display text: prefer error, then local transcript
const displayText = recording.error || localTranscript
const hasContent = Boolean(displayText) && connectionState === "connected"
// Determine if current transcript is partial (for styling)
const isPartial = Boolean(lastTranscriptRef.current)
return (
<div className="relative mx-auto flex h-full w-full max-w-4xl flex-col items-center justify-center">
<BackgroundAura
status={connectionState === "connecting" ? "connecting" : scribe.status}
isConnected={connectionState === "connected"}
/>
<style jsx>{`
@keyframes shimmer {
0% {
transform: translateX(-20%) scale(1);
}
50% {
transform: translateX(20%) scale(1.1);
}
100% {
transform: translateX(-20%) scale(1);
}
}
@keyframes drift {
0% {
transform: translateX(-10%) scale(1);
}
100% {
transform: translateX(10%) scale(1.05);
}
}
@keyframes pulseGlow {
0% {
opacity: 0.5;
transform: translateY(0) scale(1);
}
100% {
opacity: 0.8;
transform: translateY(-5%) scale(1.02);
}
}
`}</style>
<div className="relative flex h-full w-full flex-col items-center justify-center gap-8 overflow-hidden px-8 py-12">
{/* Main transcript area */}
<div className="relative flex min-h-[350px] w-full flex-1 items-center justify-center overflow-hidden">
{/* Transcript - shown when there's content */}
<div
className={cn(
"absolute inset-0 transition-opacity duration-250",
hasContent ? "opacity-100" : "pointer-events-none opacity-0"
)}
>
{hasContent && (
<TranscriberTranscript
transcript={displayText}
error={recording.error}
isPartial={isPartial}
isConnected={connectionState === "connected"}
/>
)}
</div>
{/* Status text - shown when no content */}
<div
className={cn(
"absolute inset-0 flex items-center justify-center transition-opacity duration-250",
!hasContent ? "opacity-100" : "pointer-events-none opacity-0"
)}
>
<div
className={cn(
"absolute transition-opacity duration-250",
connectionState === "connecting"
? "opacity-100"
: "pointer-events-none opacity-0"
)}
>
<ShimmeringText
text="Connecting..."
className="text-2xl font-light tracking-wide whitespace-nowrap"
/>
</div>
<div
className={cn(
"absolute transition-opacity duration-250",
connectionState === "connected" && !hasContent
? "opacity-100"
: "pointer-events-none opacity-0"
)}
>
<ShimmeringText
text="Say something aloud..."
className="text-3xl font-light tracking-wide whitespace-nowrap"
/>
</div>
</div>
{/* Language selector and button - only shown when not connected */}
<div
className={cn(
"absolute inset-0 flex items-center justify-center transition-opacity duration-250",
connectionState === "idle"
? "opacity-100"
: "pointer-events-none opacity-0"
)}
>
<div className="flex w-full max-w-sm flex-col gap-4 px-8">
<div className="flex flex-col items-center gap-6">
<div className="flex flex-col items-center gap-2 text-center">
<h1 className="text-2xl font-semibold tracking-tight">
Realtime Speech to Text
</h1>
<p className="text-muted-foreground text-sm">
Transcribe your voice in real-time with high accuracy
</p>
</div>
<div className="w-full space-y-2">
<label className="text-foreground/70 text-sm font-medium">
Language
</label>
<LanguageSelector
value={selectedLanguage}
onValueChange={setSelectedLanguage}
disabled={connectionState !== "idle"}
/>
</div>
<Button
onClick={handleToggleRecording}
disabled={false}
size="lg"
className="bg-foreground/95 hover:bg-foreground/90 w-full justify-center gap-3"
>
<span>Start Transcribing</span>
<kbd className="border-background/20 bg-background/10 hidden h-5 items-center gap-1 rounded border px-1.5 font-mono text-xs sm:inline-flex">
{isMac ? "⌘K" : "Ctrl+K"}
</kbd>
</Button>
<Badge variant="outline" asChild>
<Link
href="https://elevenlabs.io/speech-to-text"
target="_blank"
rel="noopener noreferrer"
className="text-foreground/60 hover:text-foreground/80 transition-colors"
>
Powered by ElevenLabs Speech to Text
</Link>
</Badge>
</div>
</div>
</div>
</div>
<BottomControls
isConnected={connectionState === "connected"}
hasError={Boolean(recording.error)}
isMac={isMac}
onStop={handleToggleRecording}
/>
</div>
</div>
)
}
const TranscriberTranscript = React.memo(
({
transcript,
error,
isPartial,
isConnected,
}: {
transcript: string
error: string
isPartial?: boolean
isConnected: boolean
}) => {
const characters = useMemo(() => transcript.split(""), [transcript])
const previousNumChars = useDebounce(
usePrevious(characters.length) || 0,
100
)
const scrollRef = useRef<HTMLDivElement>(null)
const scrollTimeoutRef = useRef<NodeJS.Timeout | null>(null)
// Auto-scroll to bottom when connected and text is updating
useEffect(() => {
if (isConnected && scrollRef.current) {
if (scrollTimeoutRef.current) {
clearTimeout(scrollTimeoutRef.current)
}
scrollTimeoutRef.current = setTimeout(() => {
if (scrollRef.current) {
scrollRef.current.scrollTop = scrollRef.current.scrollHeight
}
}, 50)
}
return () => {
if (scrollTimeoutRef.current) {
clearTimeout(scrollTimeoutRef.current)
}
}
}, [transcript, isConnected])
return (
<div className="absolute inset-0 flex flex-col">
<div ref={scrollRef} className="flex-1 overflow-auto">
<div
className={cn(
"min-h-[50%] w-full px-12 py-8",
isConnected && "absolute bottom-16"
)}
>
<div
className={cn(
"text-foreground/90 w-full text-xl leading-relaxed font-light",
error && "text-red-500",
isPartial && !error && "text-foreground/60"
)}
>
{characters.map((char, index) => {
const delay =
index >= previousNumChars
? (index - previousNumChars + 1) * 0.012
: 0
return (
<TranscriptCharacter key={index} char={char} delay={delay} />
)
})}
</div>
</div>
</div>
{transcript && !error && !isPartial && (
<Button
variant="ghost"
size="icon"
className="absolute top-4 right-4 h-8 w-8 opacity-0 transition-opacity hover:opacity-60"
onClick={() => {
navigator.clipboard.writeText(transcript)
}}
aria-label="Copy transcript"
>
<Copy className="h-4 w-4" />
</Button>
)}
</div>
)
}
)
TranscriberTranscript.displayName = "TranscriberTranscript"
Files
"use client"
import { useCallback, useEffect, useRef, useState } from "react"
import type { ComponentProps } from "react"
import { useConversation } from "@elevenlabs/react"
import {
AudioLinesIcon,
CheckIcon,
CopyIcon,
PhoneOffIcon,
SendIcon,
} from "lucide-react"
import { cn } from "@/lib/utils"
import { Button } from "@/components/ui/button"
import {
Card,
CardContent,
CardFooter,
CardHeader,
} from "@/components/ui/card"
import {
Conversation,
ConversationContent,
ConversationEmptyState,
ConversationScrollButton,
} from "@/components/ui/conversation"
import { Input } from "@/components/ui/input"
import { Message, MessageContent } from "@/components/ui/message"
import { Orb } from "@/components/ui/orb"
import { Response } from "@/components/ui/response"
import { ShimmeringText } from "@/components/ui/shimmering-text"
import {
Tooltip,
TooltipContent,
TooltipProvider,
TooltipTrigger,
} from "@/components/ui/tooltip"
type SystemMessageType = "initial" | "connecting" | "connected" | "error"
interface ChatMessage {
role: "user" | "assistant"
content: string
timestamp?: Date
type?: SystemMessageType
}
const DEFAULT_AGENT = {
agentId: process.env.NEXT_PUBLIC_ELEVENLABS_AGENT_ID!,
name: "Customer Support",
description: "AI Voice Assistant",
}
type ChatActionsProps = ComponentProps<"div">
const ChatActions = ({ className, children, ...props }: ChatActionsProps) => (
<div className={cn("flex items-center gap-1", className)} {...props}>
{children}
</div>
)
type ChatActionProps = ComponentProps<typeof Button> & {
tooltip?: string
label?: string
}
const ChatAction = ({
tooltip,
children,
label,
className,
variant = "ghost",
size = "sm",
...props
}: ChatActionProps) => {
const button = (
<Button
className={cn(
"text-muted-foreground hover:text-foreground relative size-9 p-1.5",
className
)}
size={size}
type="button"
variant={variant}
{...props}
>
{children}
<span className="sr-only">{label || tooltip}</span>
</Button>
)
if (tooltip) {
return (
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>{button}</TooltipTrigger>
<TooltipContent>
<p>{tooltip}</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
)
}
return button
}
export default function Page() {
const [messages, setMessages] = useState<ChatMessage[]>([])
const [agentState, setAgentState] = useState<
"disconnected" | "connecting" | "connected" | "disconnecting" | null
>("disconnected")
const [textInput, setTextInput] = useState("")
const [copiedIndex, setCopiedIndex] = useState<number | null>(null)
const [errorMessage, setErrorMessage] = useState<string | null>(null)
const mediaStreamRef = useRef<MediaStream | null>(null)
const isTextOnlyModeRef = useRef<boolean>(true)
const conversation = useConversation({
onConnect: () => {
// Only clear messages for voice mode
if (!isTextOnlyModeRef.current) {
setMessages([])
}
},
onDisconnect: () => {
// Only clear messages for voice mode
if (!isTextOnlyModeRef.current) {
setMessages([])
}
},
onMessage: (message) => {
if (message.message) {
const newMessage: ChatMessage = {
role: message.source === "user" ? "user" : "assistant",
content: message.message,
}
setMessages((prev) => [...prev, newMessage])
}
},
onError: (error) => {
console.error("Error:", error)
setAgentState("disconnected")
},
onDebug: (debug) => {
console.log("Debug:", debug)
},
})
const getMicStream = useCallback(async () => {
if (mediaStreamRef.current) return mediaStreamRef.current
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
mediaStreamRef.current = stream
setErrorMessage(null)
return stream
} catch (error) {
if (error instanceof DOMException && error.name === "NotAllowedError") {
setErrorMessage("Please enable microphone permissions in your browser.")
}
throw error
}
}, [])
const startConversation = useCallback(
async (
textOnly: boolean = true,
skipConnectingMessage: boolean = false
) => {
try {
isTextOnlyModeRef.current = textOnly
if (!skipConnectingMessage) {
setMessages([])
}
if (!textOnly) {
await getMicStream()
}
await conversation.startSession({
agentId: DEFAULT_AGENT.agentId,
connectionType: textOnly ? "websocket" : "webrtc",
overrides: {
conversation: {
textOnly: textOnly,
},
agent: {
firstMessage: textOnly ? "" : undefined,
},
},
onStatusChange: (status) => setAgentState(status.status),
})
} catch (error) {
console.error(error)
setAgentState("disconnected")
setMessages([])
}
},
[conversation, getMicStream]
)
const handleCall = useCallback(async () => {
if (agentState === "disconnected" || agentState === null) {
setAgentState("connecting")
try {
await startConversation(false)
} catch {
setAgentState("disconnected")
}
} else if (agentState === "connected") {
conversation.endSession()
setAgentState("disconnected")
if (mediaStreamRef.current) {
mediaStreamRef.current.getTracks().forEach((t) => t.stop())
mediaStreamRef.current = null
}
}
}, [agentState, conversation, startConversation])
const handleTextInputChange = useCallback(
(e: React.ChangeEvent<HTMLInputElement>) => {
setTextInput(e.target.value)
},
[]
)
const handleSendText = useCallback(async () => {
if (!textInput.trim()) return
const messageToSend = textInput
if (agentState === "disconnected" || agentState === null) {
const userMessage: ChatMessage = {
role: "user",
content: messageToSend,
}
setTextInput("")
setAgentState("connecting")
try {
await startConversation(true, true)
// Add message once converstation started
setMessages([userMessage])
// Send message after connection is established
conversation.sendUserMessage(messageToSend)
} catch (error) {
console.error("Failed to start conversation:", error)
}
} else if (agentState === "connected") {
const newMessage: ChatMessage = {
role: "user",
content: messageToSend,
}
setMessages((prev) => [...prev, newMessage])
setTextInput("")
conversation.sendUserMessage(messageToSend)
}
}, [textInput, agentState, conversation, startConversation])
const handleKeyDown = useCallback(
(e: React.KeyboardEvent<HTMLInputElement>) => {
if (e.key === "Enter" && !e.shiftKey) {
e.preventDefault()
handleSendText()
}
},
[handleSendText]
)
useEffect(() => {
return () => {
if (mediaStreamRef.current) {
mediaStreamRef.current.getTracks().forEach((t) => t.stop())
}
}
}, [])
const isCallActive = agentState === "connected"
const isTransitioning =
agentState === "connecting" || agentState === "disconnecting"
const getInputVolume = useCallback(() => {
const rawValue = conversation.getInputVolume?.() ?? 0
return Math.min(1.0, Math.pow(rawValue, 0.5) * 2.5)
}, [conversation])
const getOutputVolume = useCallback(() => {
const rawValue = conversation.getOutputVolume?.() ?? 0
return Math.min(1.0, Math.pow(rawValue, 0.5) * 2.5)
}, [conversation])
return (
<Card
className={cn(
"mx-auto flex h-[380px] w-full flex-col gap-0 overflow-hidden"
)}
>
<CardHeader className="flex shrink-0 flex-row items-center justify-between pb-4">
<div className="flex items-center gap-4">
<div className="ring-border relative size-10 overflow-hidden rounded-full ring-1">
<Orb
className="h-full w-full"
volumeMode="manual"
getInputVolume={getInputVolume}
getOutputVolume={getOutputVolume}
/>
</div>
<div className="flex flex-col gap-0.5">
<p className="text-sm leading-none font-medium">
{DEFAULT_AGENT.name}
</p>
<div className="flex items-center gap-2">
{errorMessage ? (
<p className="text-destructive text-xs">{errorMessage}</p>
) : agentState === "disconnected" || agentState === null ? (
<p className="text-muted-foreground text-xs">
Tap to start voice chat
</p>
) : agentState === "connected" ? (
<p className="text-xs text-green-600">Connected</p>
) : isTransitioning ? (
<ShimmeringText
text={agentState}
className="text-xs capitalize"
/>
) : null}
</div>
</div>
</div>
<div
className={cn(
"flex h-2 w-2 rounded-full transition-all duration-300",
agentState === "connected" &&
"bg-green-500 shadow-[0_0_8px_rgba(34,197,94,0.5)]",
isTransitioning && "animate-pulse bg-white/40"
)}
/>
</CardHeader>
<CardContent className="flex-1 overflow-hidden p-0">
<Conversation className="h-full">
<ConversationContent className="flex min-w-0 flex-col gap-2 p-6 pb-2">
{messages.length === 0 ? (
<ConversationEmptyState
icon={<Orb className="size-12" />}
title={
agentState === "connecting" ? (
<ShimmeringText text="Starting conversation" />
) : agentState === "connected" ? (
<ShimmeringText text="Start talking or type" />
) : (
"Start a conversation"
)
}
description={
agentState === "connecting"
? "Connecting..."
: agentState === "connected"
? "Ready to chat"
: "Type a message or tap the voice button"
}
/>
) : (
messages.map((message, index) => {
return (
<div key={index} className="flex w-full flex-col gap-1">
<Message from={message.role}>
<MessageContent className="max-w-full min-w-0">
<Response className="w-auto [overflow-wrap:anywhere] whitespace-pre-wrap">
{message.content}
</Response>
</MessageContent>
{message.role === "assistant" && (
<div className="ring-border size-6 flex-shrink-0 self-end overflow-hidden rounded-full ring-1">
<Orb
className="h-full w-full"
agentState={
isCallActive && index === messages.length - 1
? "talking"
: null
}
/>
</div>
)}
</Message>
{message.role === "assistant" && (
<ChatActions>
<ChatAction
size="sm"
tooltip={copiedIndex === index ? "Copied!" : "Copy"}
onClick={() => {
navigator.clipboard.writeText(message.content)
setCopiedIndex(index)
setTimeout(() => setCopiedIndex(null), 2000)
}}
>
{copiedIndex === index ? (
<CheckIcon className="size-4" />
) : (
<CopyIcon className="size-4" />
)}
</ChatAction>
</ChatActions>
)}
</div>
)
})
)}
</ConversationContent>
<ConversationScrollButton />
</Conversation>
</CardContent>
<CardFooter className="shrink-0 border-t">
<div className="flex w-full items-center gap-2">
<div className="flex flex-1 items-center gap-2">
<Input
value={textInput}
onChange={handleTextInputChange}
onKeyDown={handleKeyDown}
placeholder="Type a message..."
className="h-9 focus-visible:ring-0 focus-visible:ring-offset-0"
disabled={isTransitioning}
/>
<Button
onClick={handleSendText}
size="icon"
variant="ghost"
className="rounded-full"
disabled={!textInput.trim() || isTransitioning}
>
<SendIcon className="size-4" />
<span className="sr-only">Send message</span>
</Button>
{!isCallActive && (
<Button
onClick={handleCall}
size="icon"
variant="ghost"
className={cn("relative shrink-0 rounded-full transition-all")}
disabled={isTransitioning}
>
<AudioLinesIcon className="size-4" />
<span className="sr-only">Start voice call</span>
</Button>
)}
{isCallActive && (
<Button
onClick={handleCall}
size="icon"
variant="secondary"
className={cn("relative shrink-0 rounded-full transition-all")}
disabled={isTransitioning}
>
<PhoneOffIcon className="size-4" />
<span className="sr-only">End call</span>
</Button>
)}
</div>
</div>
</CardFooter>
</Card>
)
}
Voice chat 1
voice-chat-01
Customer Support
Tap to start voice chat
Start a conversation
Type a message or tap the voice button
"use client"
import { useCallback, useEffect, useRef, useState } from "react"
import type { ComponentProps } from "react"
import { useConversation } from "@elevenlabs/react"
import {
AudioLinesIcon,
CheckIcon,
CopyIcon,
PhoneOffIcon,
SendIcon,
} from "lucide-react"
import { cn } from "@/lib/utils"
import { Button } from "@/components/ui/button"
import {
Card,
CardContent,
CardFooter,
CardHeader,
} from "@/components/ui/card"
import {
Conversation,
ConversationContent,
ConversationEmptyState,
ConversationScrollButton,
} from "@/components/ui/conversation"
import { Input } from "@/components/ui/input"
import { Message, MessageContent } from "@/components/ui/message"
import { Orb } from "@/components/ui/orb"
import { Response } from "@/components/ui/response"
import { ShimmeringText } from "@/components/ui/shimmering-text"
import {
Tooltip,
TooltipContent,
TooltipProvider,
TooltipTrigger,
} from "@/components/ui/tooltip"
type SystemMessageType = "initial" | "connecting" | "connected" | "error"
interface ChatMessage {
role: "user" | "assistant"
content: string
timestamp?: Date
type?: SystemMessageType
}
const DEFAULT_AGENT = {
agentId: process.env.NEXT_PUBLIC_ELEVENLABS_AGENT_ID!,
name: "Customer Support",
description: "AI Voice Assistant",
}
type ChatActionsProps = ComponentProps<"div">
const ChatActions = ({ className, children, ...props }: ChatActionsProps) => (
<div className={cn("flex items-center gap-1", className)} {...props}>
{children}
</div>
)
type ChatActionProps = ComponentProps<typeof Button> & {
tooltip?: string
label?: string
}
const ChatAction = ({
tooltip,
children,
label,
className,
variant = "ghost",
size = "sm",
...props
}: ChatActionProps) => {
const button = (
<Button
className={cn(
"text-muted-foreground hover:text-foreground relative size-9 p-1.5",
className
)}
size={size}
type="button"
variant={variant}
{...props}
>
{children}
<span className="sr-only">{label || tooltip}</span>
</Button>
)
if (tooltip) {
return (
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>{button}</TooltipTrigger>
<TooltipContent>
<p>{tooltip}</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
)
}
return button
}
export default function Page() {
const [messages, setMessages] = useState<ChatMessage[]>([])
const [agentState, setAgentState] = useState<
"disconnected" | "connecting" | "connected" | "disconnecting" | null
>("disconnected")
const [textInput, setTextInput] = useState("")
const [copiedIndex, setCopiedIndex] = useState<number | null>(null)
const [errorMessage, setErrorMessage] = useState<string | null>(null)
const mediaStreamRef = useRef<MediaStream | null>(null)
const isTextOnlyModeRef = useRef<boolean>(true)
const conversation = useConversation({
onConnect: () => {
// Only clear messages for voice mode
if (!isTextOnlyModeRef.current) {
setMessages([])
}
},
onDisconnect: () => {
// Only clear messages for voice mode
if (!isTextOnlyModeRef.current) {
setMessages([])
}
},
onMessage: (message) => {
if (message.message) {
const newMessage: ChatMessage = {
role: message.source === "user" ? "user" : "assistant",
content: message.message,
}
setMessages((prev) => [...prev, newMessage])
}
},
onError: (error) => {
console.error("Error:", error)
setAgentState("disconnected")
},
onDebug: (debug) => {
console.log("Debug:", debug)
},
})
const getMicStream = useCallback(async () => {
if (mediaStreamRef.current) return mediaStreamRef.current
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
mediaStreamRef.current = stream
setErrorMessage(null)
return stream
} catch (error) {
if (error instanceof DOMException && error.name === "NotAllowedError") {
setErrorMessage("Please enable microphone permissions in your browser.")
}
throw error
}
}, [])
const startConversation = useCallback(
async (
textOnly: boolean = true,
skipConnectingMessage: boolean = false
) => {
try {
isTextOnlyModeRef.current = textOnly
if (!skipConnectingMessage) {
setMessages([])
}
if (!textOnly) {
await getMicStream()
}
await conversation.startSession({
agentId: DEFAULT_AGENT.agentId,
connectionType: textOnly ? "websocket" : "webrtc",
overrides: {
conversation: {
textOnly: textOnly,
},
agent: {
firstMessage: textOnly ? "" : undefined,
},
},
onStatusChange: (status) => setAgentState(status.status),
})
} catch (error) {
console.error(error)
setAgentState("disconnected")
setMessages([])
}
},
[conversation, getMicStream]
)
const handleCall = useCallback(async () => {
if (agentState === "disconnected" || agentState === null) {
setAgentState("connecting")
try {
await startConversation(false)
} catch {
setAgentState("disconnected")
}
} else if (agentState === "connected") {
conversation.endSession()
setAgentState("disconnected")
if (mediaStreamRef.current) {
mediaStreamRef.current.getTracks().forEach((t) => t.stop())
mediaStreamRef.current = null
}
}
}, [agentState, conversation, startConversation])
const handleTextInputChange = useCallback(
(e: React.ChangeEvent<HTMLInputElement>) => {
setTextInput(e.target.value)
},
[]
)
const handleSendText = useCallback(async () => {
if (!textInput.trim()) return
const messageToSend = textInput
if (agentState === "disconnected" || agentState === null) {
const userMessage: ChatMessage = {
role: "user",
content: messageToSend,
}
setTextInput("")
setAgentState("connecting")
try {
await startConversation(true, true)
// Add message once converstation started
setMessages([userMessage])
// Send message after connection is established
conversation.sendUserMessage(messageToSend)
} catch (error) {
console.error("Failed to start conversation:", error)
}
} else if (agentState === "connected") {
const newMessage: ChatMessage = {
role: "user",
content: messageToSend,
}
setMessages((prev) => [...prev, newMessage])
setTextInput("")
conversation.sendUserMessage(messageToSend)
}
}, [textInput, agentState, conversation, startConversation])
const handleKeyDown = useCallback(
(e: React.KeyboardEvent<HTMLInputElement>) => {
if (e.key === "Enter" && !e.shiftKey) {
e.preventDefault()
handleSendText()
}
},
[handleSendText]
)
useEffect(() => {
return () => {
if (mediaStreamRef.current) {
mediaStreamRef.current.getTracks().forEach((t) => t.stop())
}
}
}, [])
const isCallActive = agentState === "connected"
const isTransitioning =
agentState === "connecting" || agentState === "disconnecting"
const getInputVolume = useCallback(() => {
const rawValue = conversation.getInputVolume?.() ?? 0
return Math.min(1.0, Math.pow(rawValue, 0.5) * 2.5)
}, [conversation])
const getOutputVolume = useCallback(() => {
const rawValue = conversation.getOutputVolume?.() ?? 0
return Math.min(1.0, Math.pow(rawValue, 0.5) * 2.5)
}, [conversation])
return (
<Card
className={cn(
"mx-auto flex h-[380px] w-full flex-col gap-0 overflow-hidden"
)}
>
<CardHeader className="flex shrink-0 flex-row items-center justify-between pb-4">
<div className="flex items-center gap-4">
<div className="ring-border relative size-10 overflow-hidden rounded-full ring-1">
<Orb
className="h-full w-full"
volumeMode="manual"
getInputVolume={getInputVolume}
getOutputVolume={getOutputVolume}
/>
</div>
<div className="flex flex-col gap-0.5">
<p className="text-sm leading-none font-medium">
{DEFAULT_AGENT.name}
</p>
<div className="flex items-center gap-2">
{errorMessage ? (
<p className="text-destructive text-xs">{errorMessage}</p>
) : agentState === "disconnected" || agentState === null ? (
<p className="text-muted-foreground text-xs">
Tap to start voice chat
</p>
) : agentState === "connected" ? (
<p className="text-xs text-green-600">Connected</p>
) : isTransitioning ? (
<ShimmeringText
text={agentState}
className="text-xs capitalize"
/>
) : null}
</div>
</div>
</div>
<div
className={cn(
"flex h-2 w-2 rounded-full transition-all duration-300",
agentState === "connected" &&
"bg-green-500 shadow-[0_0_8px_rgba(34,197,94,0.5)]",
isTransitioning && "animate-pulse bg-white/40"
)}
/>
</CardHeader>
<CardContent className="flex-1 overflow-hidden p-0">
<Conversation className="h-full">
<ConversationContent className="flex min-w-0 flex-col gap-2 p-6 pb-2">
{messages.length === 0 ? (
<ConversationEmptyState
icon={<Orb className="size-12" />}
title={
agentState === "connecting" ? (
<ShimmeringText text="Starting conversation" />
) : agentState === "connected" ? (
<ShimmeringText text="Start talking or type" />
) : (
"Start a conversation"
)
}
description={
agentState === "connecting"
? "Connecting..."
: agentState === "connected"
? "Ready to chat"
: "Type a message or tap the voice button"
}
/>
) : (
messages.map((message, index) => {
return (
<div key={index} className="flex w-full flex-col gap-1">
<Message from={message.role}>
<MessageContent className="max-w-full min-w-0">
<Response className="w-auto [overflow-wrap:anywhere] whitespace-pre-wrap">
{message.content}
</Response>
</MessageContent>
{message.role === "assistant" && (
<div className="ring-border size-6 flex-shrink-0 self-end overflow-hidden rounded-full ring-1">
<Orb
className="h-full w-full"
agentState={
isCallActive && index === messages.length - 1
? "talking"
: null
}
/>
</div>
)}
</Message>
{message.role === "assistant" && (
<ChatActions>
<ChatAction
size="sm"
tooltip={copiedIndex === index ? "Copied!" : "Copy"}
onClick={() => {
navigator.clipboard.writeText(message.content)
setCopiedIndex(index)
setTimeout(() => setCopiedIndex(null), 2000)
}}
>
{copiedIndex === index ? (
<CheckIcon className="size-4" />
) : (
<CopyIcon className="size-4" />
)}
</ChatAction>
</ChatActions>
)}
</div>
)
})
)}
</ConversationContent>
<ConversationScrollButton />
</Conversation>
</CardContent>
<CardFooter className="shrink-0 border-t">
<div className="flex w-full items-center gap-2">
<div className="flex flex-1 items-center gap-2">
<Input
value={textInput}
onChange={handleTextInputChange}
onKeyDown={handleKeyDown}
placeholder="Type a message..."
className="h-9 focus-visible:ring-0 focus-visible:ring-offset-0"
disabled={isTransitioning}
/>
<Button
onClick={handleSendText}
size="icon"
variant="ghost"
className="rounded-full"
disabled={!textInput.trim() || isTransitioning}
>
<SendIcon className="size-4" />
<span className="sr-only">Send message</span>
</Button>
{!isCallActive && (
<Button
onClick={handleCall}
size="icon"
variant="ghost"
className={cn("relative shrink-0 rounded-full transition-all")}
disabled={isTransitioning}
>
<AudioLinesIcon className="size-4" />
<span className="sr-only">Start voice call</span>
</Button>
)}
{isCallActive && (
<Button
onClick={handleCall}
size="icon"
variant="secondary"
className={cn("relative shrink-0 rounded-full transition-all")}
disabled={isTransitioning}
>
<PhoneOffIcon className="size-4" />
<span className="sr-only">End call</span>
</Button>
)}
</div>
</div>
</CardFooter>
</Card>
)
}
Files
import { PongGame } from "@/components/pong-game"
export default function Page() {
return <PongGame />
}
Retro Pong game with Matrix display
pong-01
playing
Player
ELEVENLABS
Press Space to start
import { PongGame } from "@/components/pong-game"
export default function Page() {
return <PongGame />
}
Files
"use client"
import { Fragment, useCallback, useEffect, useRef, useState } from "react"
import { Copy } from "lucide-react"
import { Streamdown } from "streamdown"
import { cn } from "@/lib/utils"
import {
transcribeAudio,
type TranscriptionResult,
} from "@/app/transcriber-01/actions/transcribe"
import { Button } from "@/components/ui/button"
import { Card } from "@/components/ui/card"
import { LiveWaveform } from "@/components/ui/live-waveform"
import { ScrollArea } from "@/components/ui/scroll-area"
import { Separator } from "@/components/ui/separator"
interface RecordingState {
isRecording: boolean
isProcessing: boolean
transcript: string
error: string
transcriptionTime?: number
}
export default function Transcriber01() {
const [recording, setRecording] = useState<RecordingState>({
isRecording: false,
isProcessing: false,
transcript: "",
error: "",
})
const mediaRecorderRef = useRef<MediaRecorder | null>(null)
const audioChunksRef = useRef<Blob[]>([])
const streamRef = useRef<MediaStream | null>(null)
const updateRecording = useCallback((updates: Partial<RecordingState>) => {
setRecording((prev) => ({ ...prev, ...updates }))
}, [])
const cleanupStream = useCallback(() => {
if (streamRef.current) {
streamRef.current.getTracks().forEach((track) => track.stop())
streamRef.current = null
}
}, [])
const stopRecording = useCallback(() => {
if (mediaRecorderRef.current?.state !== "inactive") {
mediaRecorderRef.current?.stop()
}
cleanupStream()
updateRecording({ isRecording: false })
}, [cleanupStream, updateRecording])
const processAudio = useCallback(
async (audioBlob: Blob) => {
updateRecording({ isProcessing: true, error: "" })
try {
const result: TranscriptionResult = await transcribeAudio({
audio: new File([audioBlob], "recording.webm", {
type: "audio/webm",
}),
})
if (result.error) {
throw new Error(result.error)
}
updateRecording({
transcript: result.text || "",
transcriptionTime: result.transcriptionTime,
isProcessing: false,
})
} catch (err) {
console.error("Transcription error:", err)
updateRecording({
error:
err instanceof Error ? err.message : "Failed to transcribe audio",
isProcessing: false,
})
}
},
[updateRecording]
)
const startRecording = useCallback(async () => {
try {
updateRecording({
transcript: "",
error: "",
transcriptionTime: undefined,
})
audioChunksRef.current = []
const stream =
await navigator.mediaDevices.getUserMedia(AUDIO_CONSTRAINTS)
streamRef.current = stream
const mimeType = getMimeType()
const mediaRecorder = new MediaRecorder(stream, { mimeType })
mediaRecorderRef.current = mediaRecorder
mediaRecorder.ondataavailable = (event: BlobEvent) => {
if (event.data.size > 0) {
audioChunksRef.current.push(event.data)
}
}
mediaRecorder.onstop = () => {
const audioBlob = new Blob(audioChunksRef.current, { type: mimeType })
processAudio(audioBlob)
}
mediaRecorder.start()
updateRecording({ isRecording: true })
} catch (err) {
updateRecording({
error: "Microphone permission denied",
isRecording: false,
})
console.error("Microphone error:", err)
}
}, [processAudio, updateRecording])
const handleRecordToggle = useCallback(() => {
if (recording.isRecording) {
stopRecording()
} else {
startRecording()
}
}, [recording.isRecording, startRecording, stopRecording])
useEffect(() => {
const handleKeyDown = (e: KeyboardEvent) => {
if (e.altKey && e.code === "Space") {
e.preventDefault()
handleRecordToggle()
}
}
window.addEventListener("keydown", handleKeyDown)
return () => window.removeEventListener("keydown", handleKeyDown)
}, [handleRecordToggle])
useEffect(() => {
return cleanupStream
}, [cleanupStream])
return (
<div className="mx-auto w-full">
<Card className="border-border relative m-0 gap-0 overflow-hidden p-0 shadow-2xl">
<div className="relative py-6">
<div className="flex h-32 items-center justify-center">
{recording.isProcessing && <TranscriberProcessing />}
{(Boolean(recording.transcript) || Boolean(recording.error)) && (
<TranscriberTranscript
transcript={recording.transcript}
error={recording.error}
/>
)}
{!recording.isProcessing &&
!Boolean(recording.transcript) &&
!Boolean(recording.error) && (
<LiveWaveform
active={recording.isRecording}
barWidth={5}
barGap={2}
barRadius={8}
barColor="#71717a"
fadeEdges
fadeWidth={48}
sensitivity={0.8}
smoothingTimeConstant={0.85}
className="w-full"
/>
)}
</div>
</div>
<Separator />
<div className="bg-card px-4 py-2">
<div className="flex items-center justify-between">
<div className="flex items-center gap-3">
<span
className={cn(
"text-muted-foreground/60 font-mono text-[10px] tracking-widest uppercase",
(recording.transcriptionTime &&
Boolean(recording.transcript)) ||
Boolean(recording.error)
? "animate-in fade-in duration-500"
: "opacity-0"
)}
>
{recording.error
? "Error"
: recording.transcriptionTime
? `${(recording.transcriptionTime / 1000).toFixed(2)}s`
: "0.00s"}
</span>
</div>
<div className="flex items-center gap-3">
<Button
variant="outline"
size="sm"
className="gap-2"
onClick={handleRecordToggle}
disabled={recording.isProcessing}
aria-label={
recording.isRecording ? "Stop recording" : "Start recording"
}
>
{recording.isRecording || recording.isProcessing
? "Stop"
: "Record"}
<kbd className="bg-muted text-muted-foreground pointer-events-none inline-flex h-5 items-center gap-1 rounded border px-1.5 font-mono text-[10px] font-medium select-none">
<span className="text-xs">⌥</span>Space
</kbd>
</Button>
</div>
</div>
</div>
</Card>
</div>
)
}
const TranscriberProcessing = () => {
return (
<LiveWaveform
active={false}
processing
barWidth={4}
barGap={1}
barRadius={8}
barColor="#71717a"
fadeEdges
fadeWidth={48}
className="w-full opacity-60"
/>
)
}
const TranscriberTranscript = ({
transcript,
error,
}: {
transcript: string
error: string
}) => {
const displayText = error || transcript
return (
<Fragment>
<div className="relative w-full max-w-2xl px-6">
<ScrollArea className="h-32 w-full">
<div
className={cn(
"text-foreground py-1 pr-8 text-left text-sm leading-relaxed",
error && "text-red-500"
)}
>
<Streamdown>{displayText}</Streamdown>
</div>
</ScrollArea>
{transcript && !error && (
<Button
variant="ghost"
size="icon"
className="absolute top-1 right-2 h-6 w-6 opacity-50 transition-opacity hover:opacity-100"
onClick={() => {
navigator.clipboard.writeText(transcript)
}}
aria-label="Copy transcript"
>
<Copy className="h-3.5 w-3.5" />
</Button>
)}
</div>
</Fragment>
)
}
const AUDIO_CONSTRAINTS: MediaStreamConstraints = {
audio: {
echoCancellation: true,
noiseSuppression: true,
autoGainControl: true,
},
}
const SUPPORTED_MIME_TYPES = ["audio/webm;codecs=opus", "audio/webm"] as const
function getMimeType(): string {
for (const type of SUPPORTED_MIME_TYPES) {
if (MediaRecorder.isTypeSupported(type)) {
return type
}
}
return "audio/webm"
}
Transcriber
transcriber-01
0.00s
"use client"
import { Fragment, useCallback, useEffect, useRef, useState } from "react"
import { Copy } from "lucide-react"
import { Streamdown } from "streamdown"
import { cn } from "@/lib/utils"
import {
transcribeAudio,
type TranscriptionResult,
} from "@/app/transcriber-01/actions/transcribe"
import { Button } from "@/components/ui/button"
import { Card } from "@/components/ui/card"
import { LiveWaveform } from "@/components/ui/live-waveform"
import { ScrollArea } from "@/components/ui/scroll-area"
import { Separator } from "@/components/ui/separator"
interface RecordingState {
isRecording: boolean
isProcessing: boolean
transcript: string
error: string
transcriptionTime?: number
}
export default function Transcriber01() {
const [recording, setRecording] = useState<RecordingState>({
isRecording: false,
isProcessing: false,
transcript: "",
error: "",
})
const mediaRecorderRef = useRef<MediaRecorder | null>(null)
const audioChunksRef = useRef<Blob[]>([])
const streamRef = useRef<MediaStream | null>(null)
const updateRecording = useCallback((updates: Partial<RecordingState>) => {
setRecording((prev) => ({ ...prev, ...updates }))
}, [])
const cleanupStream = useCallback(() => {
if (streamRef.current) {
streamRef.current.getTracks().forEach((track) => track.stop())
streamRef.current = null
}
}, [])
const stopRecording = useCallback(() => {
if (mediaRecorderRef.current?.state !== "inactive") {
mediaRecorderRef.current?.stop()
}
cleanupStream()
updateRecording({ isRecording: false })
}, [cleanupStream, updateRecording])
const processAudio = useCallback(
async (audioBlob: Blob) => {
updateRecording({ isProcessing: true, error: "" })
try {
const result: TranscriptionResult = await transcribeAudio({
audio: new File([audioBlob], "recording.webm", {
type: "audio/webm",
}),
})
if (result.error) {
throw new Error(result.error)
}
updateRecording({
transcript: result.text || "",
transcriptionTime: result.transcriptionTime,
isProcessing: false,
})
} catch (err) {
console.error("Transcription error:", err)
updateRecording({
error:
err instanceof Error ? err.message : "Failed to transcribe audio",
isProcessing: false,
})
}
},
[updateRecording]
)
const startRecording = useCallback(async () => {
try {
updateRecording({
transcript: "",
error: "",
transcriptionTime: undefined,
})
audioChunksRef.current = []
const stream =
await navigator.mediaDevices.getUserMedia(AUDIO_CONSTRAINTS)
streamRef.current = stream
const mimeType = getMimeType()
const mediaRecorder = new MediaRecorder(stream, { mimeType })
mediaRecorderRef.current = mediaRecorder
mediaRecorder.ondataavailable = (event: BlobEvent) => {
if (event.data.size > 0) {
audioChunksRef.current.push(event.data)
}
}
mediaRecorder.onstop = () => {
const audioBlob = new Blob(audioChunksRef.current, { type: mimeType })
processAudio(audioBlob)
}
mediaRecorder.start()
updateRecording({ isRecording: true })
} catch (err) {
updateRecording({
error: "Microphone permission denied",
isRecording: false,
})
console.error("Microphone error:", err)
}
}, [processAudio, updateRecording])
const handleRecordToggle = useCallback(() => {
if (recording.isRecording) {
stopRecording()
} else {
startRecording()
}
}, [recording.isRecording, startRecording, stopRecording])
useEffect(() => {
const handleKeyDown = (e: KeyboardEvent) => {
if (e.altKey && e.code === "Space") {
e.preventDefault()
handleRecordToggle()
}
}
window.addEventListener("keydown", handleKeyDown)
return () => window.removeEventListener("keydown", handleKeyDown)
}, [handleRecordToggle])
useEffect(() => {
return cleanupStream
}, [cleanupStream])
return (
<div className="mx-auto w-full">
<Card className="border-border relative m-0 gap-0 overflow-hidden p-0 shadow-2xl">
<div className="relative py-6">
<div className="flex h-32 items-center justify-center">
{recording.isProcessing && <TranscriberProcessing />}
{(Boolean(recording.transcript) || Boolean(recording.error)) && (
<TranscriberTranscript
transcript={recording.transcript}
error={recording.error}
/>
)}
{!recording.isProcessing &&
!Boolean(recording.transcript) &&
!Boolean(recording.error) && (
<LiveWaveform
active={recording.isRecording}
barWidth={5}
barGap={2}
barRadius={8}
barColor="#71717a"
fadeEdges
fadeWidth={48}
sensitivity={0.8}
smoothingTimeConstant={0.85}
className="w-full"
/>
)}
</div>
</div>
<Separator />
<div className="bg-card px-4 py-2">
<div className="flex items-center justify-between">
<div className="flex items-center gap-3">
<span
className={cn(
"text-muted-foreground/60 font-mono text-[10px] tracking-widest uppercase",
(recording.transcriptionTime &&
Boolean(recording.transcript)) ||
Boolean(recording.error)
? "animate-in fade-in duration-500"
: "opacity-0"
)}
>
{recording.error
? "Error"
: recording.transcriptionTime
? `${(recording.transcriptionTime / 1000).toFixed(2)}s`
: "0.00s"}
</span>
</div>
<div className="flex items-center gap-3">
<Button
variant="outline"
size="sm"
className="gap-2"
onClick={handleRecordToggle}
disabled={recording.isProcessing}
aria-label={
recording.isRecording ? "Stop recording" : "Start recording"
}
>
{recording.isRecording || recording.isProcessing
? "Stop"
: "Record"}
<kbd className="bg-muted text-muted-foreground pointer-events-none inline-flex h-5 items-center gap-1 rounded border px-1.5 font-mono text-[10px] font-medium select-none">
<span className="text-xs">⌥</span>Space
</kbd>
</Button>
</div>
</div>
</div>
</Card>
</div>
)
}
const TranscriberProcessing = () => {
return (
<LiveWaveform
active={false}
processing
barWidth={4}
barGap={1}
barRadius={8}
barColor="#71717a"
fadeEdges
fadeWidth={48}
className="w-full opacity-60"
/>
)
}
const TranscriberTranscript = ({
transcript,
error,
}: {
transcript: string
error: string
}) => {
const displayText = error || transcript
return (
<Fragment>
<div className="relative w-full max-w-2xl px-6">
<ScrollArea className="h-32 w-full">
<div
className={cn(
"text-foreground py-1 pr-8 text-left text-sm leading-relaxed",
error && "text-red-500"
)}
>
<Streamdown>{displayText}</Streamdown>
</div>
</ScrollArea>
{transcript && !error && (
<Button
variant="ghost"
size="icon"
className="absolute top-1 right-2 h-6 w-6 opacity-50 transition-opacity hover:opacity-100"
onClick={() => {
navigator.clipboard.writeText(transcript)
}}
aria-label="Copy transcript"
>
<Copy className="h-3.5 w-3.5" />
</Button>
)}
</div>
</Fragment>
)
}
const AUDIO_CONSTRAINTS: MediaStreamConstraints = {
audio: {
echoCancellation: true,
noiseSuppression: true,
autoGainControl: true,
},
}
const SUPPORTED_MIME_TYPES = ["audio/webm;codecs=opus", "audio/webm"] as const
function getMimeType(): string {
for (const type of SUPPORTED_MIME_TYPES) {
if (MediaRecorder.isTypeSupported(type)) {
return type
}
}
return "audio/webm"
}
Files
import { Speaker } from "@/components/speaker"
export default function Page() {
return <Speaker />
}
EL-01 Speaker
speaker-01
import { Speaker } from "@/components/speaker"
export default function Page() {
return <Speaker />
}
Files
"use client"
import { useCallback, useEffect, useRef, useState } from "react"
import { zodResolver } from "@hookform/resolvers/zod"
import { useForm } from "react-hook-form"
import { cn } from "@/lib/utils"
import { voiceToFormAction } from "@/app/voice-form/actions/voice-to-form"
import {
exampleFormSchema,
ExampleFormValues,
} from "@/app/voice-form/schema"
import { Button } from "@/components/ui/button"
import {
Card,
CardContent,
CardDescription,
CardHeader,
CardTitle,
} from "@/components/ui/card"
import {
Form,
FormControl,
FormField,
FormItem,
FormLabel,
FormMessage,
} from "@/components/ui/form"
import { Input } from "@/components/ui/input"
import { VoiceButton } from "@/components/ui/voice-button"
const AUDIO_CONSTRAINTS: MediaStreamConstraints = {
audio: {
echoCancellation: true,
noiseSuppression: true,
autoGainControl: true,
},
}
const SUPPORTED_MIME_TYPES = ["audio/webm;codecs=opus", "audio/webm"] as const
function getMimeType(): string {
for (const type of SUPPORTED_MIME_TYPES) {
if (MediaRecorder.isTypeSupported(type)) {
return type
}
}
return "audio/webm"
}
export default function Page() {
const [isRecording, setIsRecording] = useState(false)
const [isProcessing, setIsProcessing] = useState(false)
const [error, setError] = useState("")
const [success, setSuccess] = useState(false)
const mediaRecorderRef = useRef<MediaRecorder | null>(null)
const audioChunksRef = useRef<Blob[]>([])
const streamRef = useRef<MediaStream | null>(null)
const form = useForm<ExampleFormValues>({
resolver: zodResolver(exampleFormSchema),
defaultValues: {
firstName: "",
lastName: "",
},
mode: "onChange",
})
const cleanupStream = useCallback(() => {
if (streamRef.current) {
streamRef.current.getTracks().forEach((track) => track.stop())
streamRef.current = null
}
}, [])
const processAudio = useCallback(
async (audioBlob: Blob) => {
setIsProcessing(true)
setError("")
setSuccess(false)
try {
const audioFile = new File([audioBlob], "audio.webm", {
type: audioBlob.type,
})
const result = await voiceToFormAction(audioFile)
if (result.data && Object.keys(result.data).length > 0) {
Object.entries(result.data).forEach(([key, value]) => {
if (value) {
form.setValue(key as keyof ExampleFormValues, value as string, {
shouldValidate: true,
})
}
})
setSuccess(true)
setTimeout(() => setSuccess(false), 2000)
}
} catch (err) {
console.error("Voice input error:", err)
setError(err instanceof Error ? err.message : "Failed to process audio")
} finally {
setIsProcessing(false)
}
},
[form]
)
const stopRecording = useCallback(() => {
if (mediaRecorderRef.current?.state !== "inactive") {
mediaRecorderRef.current?.stop()
}
cleanupStream()
setIsRecording(false)
}, [cleanupStream])
const startRecording = useCallback(async () => {
try {
setError("")
audioChunksRef.current = []
const stream =
await navigator.mediaDevices.getUserMedia(AUDIO_CONSTRAINTS)
streamRef.current = stream
const mimeType = getMimeType()
const mediaRecorder = new MediaRecorder(stream, { mimeType })
mediaRecorderRef.current = mediaRecorder
mediaRecorder.ondataavailable = (event: BlobEvent) => {
if (event.data.size > 0) {
audioChunksRef.current.push(event.data)
}
}
mediaRecorder.onstop = () => {
const audioBlob = new Blob(audioChunksRef.current, { type: mimeType })
processAudio(audioBlob)
}
mediaRecorder.start()
setIsRecording(true)
} catch (err) {
setError("Microphone permission denied")
console.error("Microphone error:", err)
}
}, [processAudio])
const handleVoiceToggle = useCallback(() => {
if (isRecording) {
stopRecording()
} else {
startRecording()
}
}, [isRecording, startRecording, stopRecording])
useEffect(() => {
return cleanupStream
}, [cleanupStream])
const onSubmit = (data: ExampleFormValues) => {
console.log("Form submitted:", data)
}
const voiceState = isProcessing
? "processing"
: isRecording
? "recording"
: success
? "success"
: error
? "error"
: "idle"
return (
<div className="mx-auto w-full">
<Card className="relative overflow-hidden">
<div className={cn("flex flex-col gap-2")}>
<CardHeader>
<div className="flex items-start justify-between">
<div className="space-y-1">
<CardTitle>Voice Fill</CardTitle>
<CardDescription>Powered by ElevenLabs Scribe</CardDescription>
</div>
<VoiceButton
state={voiceState}
onPress={handleVoiceToggle}
disabled={isProcessing}
trailing="Voice Fill"
/>
</div>
</CardHeader>
<CardContent>
<Form {...form}>
<form
onSubmit={form.handleSubmit(onSubmit)}
className="space-y-6"
>
<div className="grid grid-cols-1 gap-4 sm:grid-cols-2">
<FormField
control={form.control}
name="firstName"
render={({ field }) => (
<FormItem>
<FormLabel>First Name *</FormLabel>
<FormControl>
<Input placeholder="John" {...field} />
</FormControl>
<FormMessage />
</FormItem>
)}
/>
<FormField
control={form.control}
name="lastName"
render={({ field }) => (
<FormItem>
<FormLabel>Last Name *</FormLabel>
<FormControl>
<Input placeholder="Doe" {...field} />
</FormControl>
<FormMessage />
</FormItem>
)}
/>
</div>
</form>
</Form>
</CardContent>
</div>
</Card>
</div>
)
}
Voice-fill form
voice-form-01
Voice Fill
Powered by ElevenLabs Scribe
"use client"
import { useCallback, useEffect, useRef, useState } from "react"
import { zodResolver } from "@hookform/resolvers/zod"
import { useForm } from "react-hook-form"
import { cn } from "@/lib/utils"
import { voiceToFormAction } from "@/app/voice-form/actions/voice-to-form"
import {
exampleFormSchema,
ExampleFormValues,
} from "@/app/voice-form/schema"
import { Button } from "@/components/ui/button"
import {
Card,
CardContent,
CardDescription,
CardHeader,
CardTitle,
} from "@/components/ui/card"
import {
Form,
FormControl,
FormField,
FormItem,
FormLabel,
FormMessage,
} from "@/components/ui/form"
import { Input } from "@/components/ui/input"
import { VoiceButton } from "@/components/ui/voice-button"
const AUDIO_CONSTRAINTS: MediaStreamConstraints = {
audio: {
echoCancellation: true,
noiseSuppression: true,
autoGainControl: true,
},
}
const SUPPORTED_MIME_TYPES = ["audio/webm;codecs=opus", "audio/webm"] as const
function getMimeType(): string {
for (const type of SUPPORTED_MIME_TYPES) {
if (MediaRecorder.isTypeSupported(type)) {
return type
}
}
return "audio/webm"
}
export default function Page() {
const [isRecording, setIsRecording] = useState(false)
const [isProcessing, setIsProcessing] = useState(false)
const [error, setError] = useState("")
const [success, setSuccess] = useState(false)
const mediaRecorderRef = useRef<MediaRecorder | null>(null)
const audioChunksRef = useRef<Blob[]>([])
const streamRef = useRef<MediaStream | null>(null)
const form = useForm<ExampleFormValues>({
resolver: zodResolver(exampleFormSchema),
defaultValues: {
firstName: "",
lastName: "",
},
mode: "onChange",
})
const cleanupStream = useCallback(() => {
if (streamRef.current) {
streamRef.current.getTracks().forEach((track) => track.stop())
streamRef.current = null
}
}, [])
const processAudio = useCallback(
async (audioBlob: Blob) => {
setIsProcessing(true)
setError("")
setSuccess(false)
try {
const audioFile = new File([audioBlob], "audio.webm", {
type: audioBlob.type,
})
const result = await voiceToFormAction(audioFile)
if (result.data && Object.keys(result.data).length > 0) {
Object.entries(result.data).forEach(([key, value]) => {
if (value) {
form.setValue(key as keyof ExampleFormValues, value as string, {
shouldValidate: true,
})
}
})
setSuccess(true)
setTimeout(() => setSuccess(false), 2000)
}
} catch (err) {
console.error("Voice input error:", err)
setError(err instanceof Error ? err.message : "Failed to process audio")
} finally {
setIsProcessing(false)
}
},
[form]
)
const stopRecording = useCallback(() => {
if (mediaRecorderRef.current?.state !== "inactive") {
mediaRecorderRef.current?.stop()
}
cleanupStream()
setIsRecording(false)
}, [cleanupStream])
const startRecording = useCallback(async () => {
try {
setError("")
audioChunksRef.current = []
const stream =
await navigator.mediaDevices.getUserMedia(AUDIO_CONSTRAINTS)
streamRef.current = stream
const mimeType = getMimeType()
const mediaRecorder = new MediaRecorder(stream, { mimeType })
mediaRecorderRef.current = mediaRecorder
mediaRecorder.ondataavailable = (event: BlobEvent) => {
if (event.data.size > 0) {
audioChunksRef.current.push(event.data)
}
}
mediaRecorder.onstop = () => {
const audioBlob = new Blob(audioChunksRef.current, { type: mimeType })
processAudio(audioBlob)
}
mediaRecorder.start()
setIsRecording(true)
} catch (err) {
setError("Microphone permission denied")
console.error("Microphone error:", err)
}
}, [processAudio])
const handleVoiceToggle = useCallback(() => {
if (isRecording) {
stopRecording()
} else {
startRecording()
}
}, [isRecording, startRecording, stopRecording])
useEffect(() => {
return cleanupStream
}, [cleanupStream])
const onSubmit = (data: ExampleFormValues) => {
console.log("Form submitted:", data)
}
const voiceState = isProcessing
? "processing"
: isRecording
? "recording"
: success
? "success"
: error
? "error"
: "idle"
return (
<div className="mx-auto w-full">
<Card className="relative overflow-hidden">
<div className={cn("flex flex-col gap-2")}>
<CardHeader>
<div className="flex items-start justify-between">
<div className="space-y-1">
<CardTitle>Voice Fill</CardTitle>
<CardDescription>Powered by ElevenLabs Scribe</CardDescription>
</div>
<VoiceButton
state={voiceState}
onPress={handleVoiceToggle}
disabled={isProcessing}
trailing="Voice Fill"
/>
</div>
</CardHeader>
<CardContent>
<Form {...form}>
<form
onSubmit={form.handleSubmit(onSubmit)}
className="space-y-6"
>
<div className="grid grid-cols-1 gap-4 sm:grid-cols-2">
<FormField
control={form.control}
name="firstName"
render={({ field }) => (
<FormItem>
<FormLabel>First Name *</FormLabel>
<FormControl>
<Input placeholder="John" {...field} />
</FormControl>
<FormMessage />
</FormItem>
)}
/>
<FormField
control={form.control}
name="lastName"
render={({ field }) => (
<FormItem>
<FormLabel>Last Name *</FormLabel>
<FormControl>
<Input placeholder="Doe" {...field} />
</FormControl>
<FormMessage />
</FormItem>
)}
/>
</div>
</form>
</Form>
</CardContent>
</div>
</Card>
</div>
)
}
Files
"use client"
import { PauseIcon, PlayIcon } from "lucide-react"
import { cn } from "@/lib/utils"
import {
AudioPlayerButton,
AudioPlayerDuration,
AudioPlayerProgress,
AudioPlayerProvider,
AudioPlayerTime,
exampleTracks,
useAudioPlayer,
} from "@/components/ui/audio-player"
import { Button } from "@/components/ui/button"
import { Card } from "@/components/ui/card"
import { ScrollArea } from "@/components/ui/scroll-area"
interface Track {
id: string
name: string
url: string
}
export default function Page() {
return (
<AudioPlayerProvider<Track>>
<MusicPlayer />
</AudioPlayerProvider>
)
}
const MusicPlayer = () => {
return (
<Card className="mx-auto w-full overflow-hidden p-0">
<div className="flex flex-col lg:h-[180px] lg:flex-row">
<div className="bg-muted/50 flex flex-col overflow-hidden lg:h-full lg:w-64">
<ScrollArea className="h-48 w-full lg:h-full">
<div className="space-y-1 p-3">
{exampleTracks.map((song, index) => (
<SongListItem
key={song.id}
song={song}
trackNumber={index + 1}
/>
))}
</div>
</ScrollArea>
</div>
<Player />
</div>
</Card>
)
}
const Player = () => {
const player = useAudioPlayer<Track>()
return (
<div className="flex flex-1 items-center p-4 sm:p-6">
<div className="mx-auto w-full max-w-2xl">
<div className="mb-4">
<h3 className="text-base font-semibold sm:text-lg">
{player.activeItem?.data?.name ?? "No track selected"}
</h3>
</div>
<div className="flex items-center gap-3 sm:gap-4">
<AudioPlayerButton
variant="outline"
size="default"
className="h-12 w-12 shrink-0 sm:h-10 sm:w-10"
disabled={!player.activeItem}
/>
<div className="flex flex-1 items-center gap-2 sm:gap-3">
<AudioPlayerTime className="text-xs tabular-nums" />
<AudioPlayerProgress className="flex-1" />
<AudioPlayerDuration className="text-xs tabular-nums" />
</div>
</div>
</div>
</div>
)
}
const SongListItem = ({
song,
trackNumber,
}: {
song: Track
trackNumber: number
}) => {
const player = useAudioPlayer<Track>()
const isActive = player.isItemActive(song.id)
const isCurrentlyPlaying = isActive && player.isPlaying
return (
<div className="group/song relative">
<Button
variant={isActive ? "secondary" : "ghost"}
size="sm"
className={cn(
"h-10 w-full justify-start px-3 font-normal sm:h-9 sm:px-2",
isActive && "bg-secondary"
)}
onClick={() => {
if (isCurrentlyPlaying) {
player.pause()
} else {
player.play({
id: song.id,
src: song.url,
data: song,
})
}
}}
>
<div className="flex w-full items-center gap-3">
<div className="flex w-5 shrink-0 items-center justify-center">
{isCurrentlyPlaying ? (
<PauseIcon className="h-4 w-4 sm:h-3.5 sm:w-3.5" />
) : (
<>
<span className="text-muted-foreground/60 text-sm tabular-nums group-hover/song:invisible">
{trackNumber}
</span>
<PlayIcon className="invisible absolute h-4 w-4 group-hover/song:visible sm:h-3.5 sm:w-3.5" />
</>
)}
</div>
<span className="truncate text-left text-sm">{song.name}</span>
</div>
</Button>
</div>
)
}
Music player with playlist
music-player-01
No track selected
0:00--:--
"use client"
import { PauseIcon, PlayIcon } from "lucide-react"
import { cn } from "@/lib/utils"
import {
AudioPlayerButton,
AudioPlayerDuration,
AudioPlayerProgress,
AudioPlayerProvider,
AudioPlayerTime,
exampleTracks,
useAudioPlayer,
} from "@/components/ui/audio-player"
import { Button } from "@/components/ui/button"
import { Card } from "@/components/ui/card"
import { ScrollArea } from "@/components/ui/scroll-area"
interface Track {
id: string
name: string
url: string
}
export default function Page() {
return (
<AudioPlayerProvider<Track>>
<MusicPlayer />
</AudioPlayerProvider>
)
}
const MusicPlayer = () => {
return (
<Card className="mx-auto w-full overflow-hidden p-0">
<div className="flex flex-col lg:h-[180px] lg:flex-row">
<div className="bg-muted/50 flex flex-col overflow-hidden lg:h-full lg:w-64">
<ScrollArea className="h-48 w-full lg:h-full">
<div className="space-y-1 p-3">
{exampleTracks.map((song, index) => (
<SongListItem
key={song.id}
song={song}
trackNumber={index + 1}
/>
))}
</div>
</ScrollArea>
</div>
<Player />
</div>
</Card>
)
}
const Player = () => {
const player = useAudioPlayer<Track>()
return (
<div className="flex flex-1 items-center p-4 sm:p-6">
<div className="mx-auto w-full max-w-2xl">
<div className="mb-4">
<h3 className="text-base font-semibold sm:text-lg">
{player.activeItem?.data?.name ?? "No track selected"}
</h3>
</div>
<div className="flex items-center gap-3 sm:gap-4">
<AudioPlayerButton
variant="outline"
size="default"
className="h-12 w-12 shrink-0 sm:h-10 sm:w-10"
disabled={!player.activeItem}
/>
<div className="flex flex-1 items-center gap-2 sm:gap-3">
<AudioPlayerTime className="text-xs tabular-nums" />
<AudioPlayerProgress className="flex-1" />
<AudioPlayerDuration className="text-xs tabular-nums" />
</div>
</div>
</div>
</div>
)
}
const SongListItem = ({
song,
trackNumber,
}: {
song: Track
trackNumber: number
}) => {
const player = useAudioPlayer<Track>()
const isActive = player.isItemActive(song.id)
const isCurrentlyPlaying = isActive && player.isPlaying
return (
<div className="group/song relative">
<Button
variant={isActive ? "secondary" : "ghost"}
size="sm"
className={cn(
"h-10 w-full justify-start px-3 font-normal sm:h-9 sm:px-2",
isActive && "bg-secondary"
)}
onClick={() => {
if (isCurrentlyPlaying) {
player.pause()
} else {
player.play({
id: song.id,
src: song.url,
data: song,
})
}
}}
>
<div className="flex w-full items-center gap-3">
<div className="flex w-5 shrink-0 items-center justify-center">
{isCurrentlyPlaying ? (
<PauseIcon className="h-4 w-4 sm:h-3.5 sm:w-3.5" />
) : (
<>
<span className="text-muted-foreground/60 text-sm tabular-nums group-hover/song:invisible">
{trackNumber}
</span>
<PlayIcon className="invisible absolute h-4 w-4 group-hover/song:visible sm:h-3.5 sm:w-3.5" />
</>
)}
</div>
<span className="truncate text-left text-sm">{song.name}</span>
</div>
</Button>
</div>
)
}
Files
"use client"
import {
AudioPlayerButton,
AudioPlayerDuration,
AudioPlayerProgress,
AudioPlayerProvider,
AudioPlayerTime,
exampleTracks,
useAudioPlayer,
} from "@/components/ui/audio-player"
import { Card } from "@/components/ui/card"
export default function Page() {
return (
<AudioPlayerProvider>
<MusicPlayerDemo />
</AudioPlayerProvider>
)
}
const MusicPlayerDemo = () => {
const player = useAudioPlayer<{ name: string }>()
const track = exampleTracks[9]
return (
<Card className="w-full overflow-hidden p-4">
<div className="space-y-4">
<div>
<h3 className="text-base font-semibold">
{player.activeItem?.data?.name || track.name}
</h3>
</div>
<div className="flex items-center gap-3">
<AudioPlayerButton
variant="outline"
size="default"
className="h-10 w-10 shrink-0"
item={{
id: track.id,
src: track.url,
data: track,
}}
/>
<div className="flex flex-1 items-center gap-2">
<AudioPlayerTime className="text-xs tabular-nums" />
<AudioPlayerProgress className="flex-1" />
<AudioPlayerDuration className="text-xs tabular-nums" />
</div>
</div>
</div>
</Card>
)
}
Simple music player
music-player-02
II - 09
0:00--:--
"use client"
import {
AudioPlayerButton,
AudioPlayerDuration,
AudioPlayerProgress,
AudioPlayerProvider,
AudioPlayerTime,
exampleTracks,
useAudioPlayer,
} from "@/components/ui/audio-player"
import { Card } from "@/components/ui/card"
export default function Page() {
return (
<AudioPlayerProvider>
<MusicPlayerDemo />
</AudioPlayerProvider>
)
}
const MusicPlayerDemo = () => {
const player = useAudioPlayer<{ name: string }>()
const track = exampleTracks[9]
return (
<Card className="w-full overflow-hidden p-4">
<div className="space-y-4">
<div>
<h3 className="text-base font-semibold">
{player.activeItem?.data?.name || track.name}
</h3>
</div>
<div className="flex items-center gap-3">
<AudioPlayerButton
variant="outline"
size="default"
className="h-10 w-10 shrink-0"
item={{
id: track.id,
src: track.url,
data: track,
}}
/>
<div className="flex flex-1 items-center gap-2">
<AudioPlayerTime className="text-xs tabular-nums" />
<AudioPlayerProgress className="flex-1" />
<AudioPlayerDuration className="text-xs tabular-nums" />
</div>
</div>
</div>
</Card>
)
}
Files
"use client"
import { useCallback, useState } from "react"
import { useConversation } from "@elevenlabs/react"
import { AnimatePresence, motion } from "framer-motion"
import { Loader2Icon, PhoneIcon, PhoneOffIcon } from "lucide-react"
import { cn } from "@/lib/utils"
import { Button } from "@/components/ui/button"
import { Card } from "@/components/ui/card"
import { Orb } from "@/components/ui/orb"
import { ShimmeringText } from "@/components/ui/shimmering-text"
const DEFAULT_AGENT = {
agentId: process.env.NEXT_PUBLIC_ELEVENLABS_AGENT_ID!,
name: "Customer Support",
description: "Tap to start voice chat",
}
type AgentState =
| "disconnected"
| "connecting"
| "connected"
| "disconnecting"
| null
export default function Page() {
const [agentState, setAgentState] = useState<AgentState>("disconnected")
const [errorMessage, setErrorMessage] = useState<string | null>(null)
const conversation = useConversation({
onConnect: () => console.log("Connected"),
onDisconnect: () => console.log("Disconnected"),
onMessage: (message) => console.log("Message:", message),
onError: (error) => {
console.error("Error:", error)
setAgentState("disconnected")
},
})
const startConversation = useCallback(async () => {
try {
setErrorMessage(null)
await navigator.mediaDevices.getUserMedia({ audio: true })
await conversation.startSession({
agentId: DEFAULT_AGENT.agentId,
connectionType: "webrtc",
onStatusChange: (status) => setAgentState(status.status),
})
} catch (error) {
console.error("Error starting conversation:", error)
setAgentState("disconnected")
if (error instanceof DOMException && error.name === "NotAllowedError") {
setErrorMessage("Please enable microphone permissions in your browser.")
}
}
}, [conversation])
const handleCall = useCallback(() => {
if (agentState === "disconnected" || agentState === null) {
setAgentState("connecting")
startConversation()
} else if (agentState === "connected") {
conversation.endSession()
setAgentState("disconnected")
}
}, [agentState, conversation, startConversation])
const isCallActive = agentState === "connected"
const isTransitioning =
agentState === "connecting" || agentState === "disconnecting"
const getInputVolume = useCallback(() => {
const rawValue = conversation.getInputVolume?.() ?? 0
return Math.min(1.0, Math.pow(rawValue, 0.5) * 2.5)
}, [conversation])
const getOutputVolume = useCallback(() => {
const rawValue = conversation.getOutputVolume?.() ?? 0
return Math.min(1.0, Math.pow(rawValue, 0.5) * 2.5)
}, [conversation])
return (
<Card className="flex h-[400px] w-full flex-col items-center justify-center overflow-hidden p-6">
<div className="flex flex-col items-center gap-6">
<div className="relative size-32">
<div className="bg-muted relative h-full w-full rounded-full p-1 shadow-[inset_0_2px_8px_rgba(0,0,0,0.1)] dark:shadow-[inset_0_2px_8px_rgba(0,0,0,0.5)]">
<div className="bg-background h-full w-full overflow-hidden rounded-full shadow-[inset_0_0_12px_rgba(0,0,0,0.05)] dark:shadow-[inset_0_0_12px_rgba(0,0,0,0.3)]">
<Orb
className="h-full w-full"
volumeMode="manual"
getInputVolume={getInputVolume}
getOutputVolume={getOutputVolume}
/>
</div>
</div>
</div>
<div className="flex flex-col items-center gap-2">
<h2 className="text-xl font-semibold">{DEFAULT_AGENT.name}</h2>
<AnimatePresence mode="wait">
{errorMessage ? (
<motion.p
key="error"
initial={{ opacity: 0, y: -10 }}
animate={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: 10 }}
className="text-destructive text-center text-sm"
>
{errorMessage}
</motion.p>
) : agentState === "disconnected" || agentState === null ? (
<motion.p
key="disconnected"
initial={{ opacity: 0, y: -10 }}
animate={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: 10 }}
className="text-muted-foreground text-sm"
>
{DEFAULT_AGENT.description}
</motion.p>
) : (
<motion.div
key="status"
initial={{ opacity: 0, y: -10 }}
animate={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: 10 }}
className="flex items-center gap-2"
>
<div
className={cn(
"h-2 w-2 rounded-full transition-all duration-300",
agentState === "connected" && "bg-green-500",
isTransitioning && "bg-primary/60 animate-pulse"
)}
/>
<span className="text-sm capitalize">
{isTransitioning ? (
<ShimmeringText text={agentState} />
) : (
<span className="text-green-600">Connected</span>
)}
</span>
</motion.div>
)}
</AnimatePresence>
</div>
<Button
onClick={handleCall}
disabled={isTransitioning}
size="icon"
variant={isCallActive ? "secondary" : "default"}
className="h-12 w-12 rounded-full"
>
<AnimatePresence mode="wait">
{isTransitioning ? (
<motion.div
key="loading"
initial={{ opacity: 0, rotate: 0 }}
animate={{ opacity: 1, rotate: 360 }}
exit={{ opacity: 0 }}
transition={{
rotate: { duration: 1, repeat: Infinity, ease: "linear" },
}}
>
<Loader2Icon className="h-5 w-5" />
</motion.div>
) : isCallActive ? (
<motion.div
key="end"
initial={{ opacity: 0, scale: 0.5 }}
animate={{ opacity: 1, scale: 1 }}
exit={{ opacity: 0, scale: 0.5 }}
>
<PhoneOffIcon className="h-5 w-5" />
</motion.div>
) : (
<motion.div
key="start"
initial={{ opacity: 0, scale: 0.5 }}
animate={{ opacity: 1, scale: 1 }}
exit={{ opacity: 0, scale: 0.5 }}
>
<PhoneIcon className="h-5 w-5" />
</motion.div>
)}
</AnimatePresence>
</Button>
</div>
</Card>
)
}
Voice chat 2
voice-chat-02
Customer Support
Tap to start voice chat
"use client"
import { useCallback, useState } from "react"
import { useConversation } from "@elevenlabs/react"
import { AnimatePresence, motion } from "framer-motion"
import { Loader2Icon, PhoneIcon, PhoneOffIcon } from "lucide-react"
import { cn } from "@/lib/utils"
import { Button } from "@/components/ui/button"
import { Card } from "@/components/ui/card"
import { Orb } from "@/components/ui/orb"
import { ShimmeringText } from "@/components/ui/shimmering-text"
const DEFAULT_AGENT = {
agentId: process.env.NEXT_PUBLIC_ELEVENLABS_AGENT_ID!,
name: "Customer Support",
description: "Tap to start voice chat",
}
type AgentState =
| "disconnected"
| "connecting"
| "connected"
| "disconnecting"
| null
export default function Page() {
const [agentState, setAgentState] = useState<AgentState>("disconnected")
const [errorMessage, setErrorMessage] = useState<string | null>(null)
const conversation = useConversation({
onConnect: () => console.log("Connected"),
onDisconnect: () => console.log("Disconnected"),
onMessage: (message) => console.log("Message:", message),
onError: (error) => {
console.error("Error:", error)
setAgentState("disconnected")
},
})
const startConversation = useCallback(async () => {
try {
setErrorMessage(null)
await navigator.mediaDevices.getUserMedia({ audio: true })
await conversation.startSession({
agentId: DEFAULT_AGENT.agentId,
connectionType: "webrtc",
onStatusChange: (status) => setAgentState(status.status),
})
} catch (error) {
console.error("Error starting conversation:", error)
setAgentState("disconnected")
if (error instanceof DOMException && error.name === "NotAllowedError") {
setErrorMessage("Please enable microphone permissions in your browser.")
}
}
}, [conversation])
const handleCall = useCallback(() => {
if (agentState === "disconnected" || agentState === null) {
setAgentState("connecting")
startConversation()
} else if (agentState === "connected") {
conversation.endSession()
setAgentState("disconnected")
}
}, [agentState, conversation, startConversation])
const isCallActive = agentState === "connected"
const isTransitioning =
agentState === "connecting" || agentState === "disconnecting"
const getInputVolume = useCallback(() => {
const rawValue = conversation.getInputVolume?.() ?? 0
return Math.min(1.0, Math.pow(rawValue, 0.5) * 2.5)
}, [conversation])
const getOutputVolume = useCallback(() => {
const rawValue = conversation.getOutputVolume?.() ?? 0
return Math.min(1.0, Math.pow(rawValue, 0.5) * 2.5)
}, [conversation])
return (
<Card className="flex h-[400px] w-full flex-col items-center justify-center overflow-hidden p-6">
<div className="flex flex-col items-center gap-6">
<div className="relative size-32">
<div className="bg-muted relative h-full w-full rounded-full p-1 shadow-[inset_0_2px_8px_rgba(0,0,0,0.1)] dark:shadow-[inset_0_2px_8px_rgba(0,0,0,0.5)]">
<div className="bg-background h-full w-full overflow-hidden rounded-full shadow-[inset_0_0_12px_rgba(0,0,0,0.05)] dark:shadow-[inset_0_0_12px_rgba(0,0,0,0.3)]">
<Orb
className="h-full w-full"
volumeMode="manual"
getInputVolume={getInputVolume}
getOutputVolume={getOutputVolume}
/>
</div>
</div>
</div>
<div className="flex flex-col items-center gap-2">
<h2 className="text-xl font-semibold">{DEFAULT_AGENT.name}</h2>
<AnimatePresence mode="wait">
{errorMessage ? (
<motion.p
key="error"
initial={{ opacity: 0, y: -10 }}
animate={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: 10 }}
className="text-destructive text-center text-sm"
>
{errorMessage}
</motion.p>
) : agentState === "disconnected" || agentState === null ? (
<motion.p
key="disconnected"
initial={{ opacity: 0, y: -10 }}
animate={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: 10 }}
className="text-muted-foreground text-sm"
>
{DEFAULT_AGENT.description}
</motion.p>
) : (
<motion.div
key="status"
initial={{ opacity: 0, y: -10 }}
animate={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: 10 }}
className="flex items-center gap-2"
>
<div
className={cn(
"h-2 w-2 rounded-full transition-all duration-300",
agentState === "connected" && "bg-green-500",
isTransitioning && "bg-primary/60 animate-pulse"
)}
/>
<span className="text-sm capitalize">
{isTransitioning ? (
<ShimmeringText text={agentState} />
) : (
<span className="text-green-600">Connected</span>
)}
</span>
</motion.div>
)}
</AnimatePresence>
</div>
<Button
onClick={handleCall}
disabled={isTransitioning}
size="icon"
variant={isCallActive ? "secondary" : "default"}
className="h-12 w-12 rounded-full"
>
<AnimatePresence mode="wait">
{isTransitioning ? (
<motion.div
key="loading"
initial={{ opacity: 0, rotate: 0 }}
animate={{ opacity: 1, rotate: 360 }}
exit={{ opacity: 0 }}
transition={{
rotate: { duration: 1, repeat: Infinity, ease: "linear" },
}}
>
<Loader2Icon className="h-5 w-5" />
</motion.div>
) : isCallActive ? (
<motion.div
key="end"
initial={{ opacity: 0, scale: 0.5 }}
animate={{ opacity: 1, scale: 1 }}
exit={{ opacity: 0, scale: 0.5 }}
>
<PhoneOffIcon className="h-5 w-5" />
</motion.div>
) : (
<motion.div
key="start"
initial={{ opacity: 0, scale: 0.5 }}
animate={{ opacity: 1, scale: 1 }}
exit={{ opacity: 0, scale: 0.5 }}
>
<PhoneIcon className="h-5 w-5" />
</motion.div>
)}
</AnimatePresence>
</Button>
</div>
</Card>
)
}
Files
"use client"
import { useState } from "react"
import { CheckIcon, CopyIcon } from "lucide-react"
import { cn } from "@/lib/utils"
import { Button } from "@/components/ui/button"
import { Card, CardContent } from "@/components/ui/card"
import {
Conversation,
ConversationContent,
ConversationEmptyState,
ConversationScrollButton,
} from "@/components/ui/conversation"
import { ConversationBar } from "@/components/ui/conversation-bar"
import { Message, MessageContent } from "@/components/ui/message"
import { Orb } from "@/components/ui/orb"
import { Response } from "@/components/ui/response"
import {
Tooltip,
TooltipContent,
TooltipProvider,
TooltipTrigger,
} from "@/components/ui/tooltip"
const DEFAULT_AGENT_ID = process.env.NEXT_PUBLIC_ELEVENLABS_AGENT_ID!
interface ChatMessage {
role: "user" | "assistant"
content: string
}
export default function Page() {
const [messages, setMessages] = useState<ChatMessage[]>([])
const [copiedIndex, setCopiedIndex] = useState<number | null>(null)
return (
<div className="relative mx-auto h-[600px] w-full">
<Card className="flex h-full w-full flex-col gap-0 overflow-hidden">
<CardContent className="relative flex-1 overflow-hidden p-0">
<Conversation className="absolute inset-0 pb-[88px]">
<ConversationContent className="flex min-w-0 flex-col gap-2 p-6 pb-6">
{messages.length === 0 ? (
<ConversationEmptyState
icon={<Orb className="size-12" />}
title="Start a conversation"
description="Tap the phone button or type a message"
/>
) : (
messages.map((message, index) => {
return (
<div key={index} className="flex w-full flex-col gap-1">
<Message from={message.role}>
<MessageContent className="max-w-full min-w-0">
<Response className="w-auto [overflow-wrap:anywhere] whitespace-pre-wrap">
{message.content}
</Response>
</MessageContent>
{message.role === "assistant" && (
<div className="ring-border size-6 flex-shrink-0 self-end overflow-hidden rounded-full ring-1">
<Orb className="h-full w-full" />
</div>
)}
</Message>
{message.role === "assistant" && (
<div className="flex items-center gap-1">
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>
<Button
className={cn(
"text-muted-foreground hover:text-foreground relative size-9 p-1.5"
)}
size="sm"
type="button"
variant="ghost"
onClick={() => {
navigator.clipboard.writeText(
message.content
)
setCopiedIndex(index)
setTimeout(() => setCopiedIndex(null), 2000)
}}
>
{copiedIndex === index ? (
<CheckIcon className="size-4" />
) : (
<CopyIcon className="size-4" />
)}
<span className="sr-only">
{copiedIndex === index ? "Copied!" : "Copy"}
</span>
</Button>
</TooltipTrigger>
<TooltipContent>
<p>
{copiedIndex === index ? "Copied!" : "Copy"}
</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
</div>
)}
</div>
)
})
)}
</ConversationContent>
<ConversationScrollButton className="bottom-[100px]" />
</Conversation>
<div className="absolute right-0 bottom-0 left-0 flex justify-center">
<ConversationBar
className="w-full max-w-2xl"
agentId={DEFAULT_AGENT_ID}
onConnect={() => setMessages([])}
onDisconnect={() => setMessages([])}
onSendMessage={(message) => {
const userMessage: ChatMessage = {
role: "user",
content: message,
}
setMessages((prev) => [...prev, userMessage])
}}
onMessage={(message) => {
const newMessage: ChatMessage = {
role: message.source === "user" ? "user" : "assistant",
content: message.message,
}
setMessages((prev) => [...prev, newMessage])
}}
onError={(error) => console.error("Conversation error:", error)}
/>
</div>
</CardContent>
</Card>
</div>
)
}
Voice chat 3
voice-chat-03
Start a conversation
Tap the phone button or type a message
Customer Support
"use client"
import { useState } from "react"
import { CheckIcon, CopyIcon } from "lucide-react"
import { cn } from "@/lib/utils"
import { Button } from "@/components/ui/button"
import { Card, CardContent } from "@/components/ui/card"
import {
Conversation,
ConversationContent,
ConversationEmptyState,
ConversationScrollButton,
} from "@/components/ui/conversation"
import { ConversationBar } from "@/components/ui/conversation-bar"
import { Message, MessageContent } from "@/components/ui/message"
import { Orb } from "@/components/ui/orb"
import { Response } from "@/components/ui/response"
import {
Tooltip,
TooltipContent,
TooltipProvider,
TooltipTrigger,
} from "@/components/ui/tooltip"
const DEFAULT_AGENT_ID = process.env.NEXT_PUBLIC_ELEVENLABS_AGENT_ID!
interface ChatMessage {
role: "user" | "assistant"
content: string
}
export default function Page() {
const [messages, setMessages] = useState<ChatMessage[]>([])
const [copiedIndex, setCopiedIndex] = useState<number | null>(null)
return (
<div className="relative mx-auto h-[600px] w-full">
<Card className="flex h-full w-full flex-col gap-0 overflow-hidden">
<CardContent className="relative flex-1 overflow-hidden p-0">
<Conversation className="absolute inset-0 pb-[88px]">
<ConversationContent className="flex min-w-0 flex-col gap-2 p-6 pb-6">
{messages.length === 0 ? (
<ConversationEmptyState
icon={<Orb className="size-12" />}
title="Start a conversation"
description="Tap the phone button or type a message"
/>
) : (
messages.map((message, index) => {
return (
<div key={index} className="flex w-full flex-col gap-1">
<Message from={message.role}>
<MessageContent className="max-w-full min-w-0">
<Response className="w-auto [overflow-wrap:anywhere] whitespace-pre-wrap">
{message.content}
</Response>
</MessageContent>
{message.role === "assistant" && (
<div className="ring-border size-6 flex-shrink-0 self-end overflow-hidden rounded-full ring-1">
<Orb className="h-full w-full" />
</div>
)}
</Message>
{message.role === "assistant" && (
<div className="flex items-center gap-1">
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>
<Button
className={cn(
"text-muted-foreground hover:text-foreground relative size-9 p-1.5"
)}
size="sm"
type="button"
variant="ghost"
onClick={() => {
navigator.clipboard.writeText(
message.content
)
setCopiedIndex(index)
setTimeout(() => setCopiedIndex(null), 2000)
}}
>
{copiedIndex === index ? (
<CheckIcon className="size-4" />
) : (
<CopyIcon className="size-4" />
)}
<span className="sr-only">
{copiedIndex === index ? "Copied!" : "Copy"}
</span>
</Button>
</TooltipTrigger>
<TooltipContent>
<p>
{copiedIndex === index ? "Copied!" : "Copy"}
</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
</div>
)}
</div>
)
})
)}
</ConversationContent>
<ConversationScrollButton className="bottom-[100px]" />
</Conversation>
<div className="absolute right-0 bottom-0 left-0 flex justify-center">
<ConversationBar
className="w-full max-w-2xl"
agentId={DEFAULT_AGENT_ID}
onConnect={() => setMessages([])}
onDisconnect={() => setMessages([])}
onSendMessage={(message) => {
const userMessage: ChatMessage = {
role: "user",
content: message,
}
setMessages((prev) => [...prev, userMessage])
}}
onMessage={(message) => {
const newMessage: ChatMessage = {
role: message.source === "user" ? "user" : "assistant",
content: message.message,
}
setMessages((prev) => [...prev, newMessage])
}}
onError={(error) => console.error("Conversation error:", error)}
/>
</div>
</CardContent>
</Card>
</div>
)
}