TypeScript SDK
Complete TypeScript SDK documentation for SpeakEasy.
npm install @arach/speakeasy
import { say, speak, SpeakEasy } from '@arach/speakeasy';
// One-liner with system voice
await say('Hello, world!');
// With specific provider
await say('Hello, world!', 'openai');
// Full configuration
const speaker = new SpeakEasy({
provider: 'openai',
openaiVoice: 'nova',
rate: 180,
cache: { enabled: true }
});
await speaker.speak('Hello, world!');
Main class for text-to-speech operations with full configuration control.
class SpeakEasy {
constructor(config?: SpeakEasyConfig)
// Primary methods
async speak(text: string, options?: SpeakEasyOptions): Promise<void>
async interrupt(): Promise<void>
// Queue management
clearQueue(): void
getQueueLength(): number
// Cache operations
async getCacheStats(): Promise<CacheStats>
}
interface SpeakEasyConfig {
provider?: 'system' | 'openai' | 'elevenlabs' | 'groq';
systemVoice?: string;
openaiVoice?: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer';
elevenlabsVoiceId?: string;
rate?: number;
volume?: number;
apiKeys?: {
openai?: string;
elevenlabs?: string;
groq?: string;
};
tempDir?: string;
debug?: boolean;
cache?: {
enabled?: boolean;
ttl?: string | number; // '7d', '1h', 86400000, etc.
maxSize?: string | number; // '100mb', '1gb', 104857600, etc.
dir?: string;
};
}
interface SpeakEasyOptions {
priority?: 'high' | 'normal' | 'low';
interrupt?: boolean;
cleanup?: boolean;
}
Simple one-liner function for quick text-to-speech.
async function say(
text: string,
provider?: 'system' | 'openai' | 'elevenlabs' | 'groq'
): Promise<void>
Enhanced function with full options support.
async function speak(
text: string,
options?: SpeakEasyOptions & { provider?: string }
): Promise<void>
import { say } from '@arach/speakeasy';
// System voice (macOS built-in)
await say('Hello from system voice');
// Specific providers
await say('Hello from OpenAI', 'openai');
await say('Hello from ElevenLabs', 'elevenlabs');
await say('Hello from Groq', 'groq');
import { SpeakEasy } from '@arach/speakeasy';
const speaker = new SpeakEasy({
provider: 'openai',
openaiVoice: 'nova',
rate: 200,
volume: 0.8,
cache: { enabled: true }
});
await speaker.speak('Hello, world!');
await speaker.speak('High priority message', { priority: 'high' });
const systemSpeaker = new SpeakEasy({
provider: 'system',
systemVoice: 'Samantha', // or 'Alex', 'Victoria', etc.
rate: 180,
volume: 0.7
});
await systemSpeaker.speak('Using macOS built-in voice');
const openaiSpeaker = new SpeakEasy({
provider: 'openai',
openaiVoice: 'nova', // alloy, echo, fable, onyx, nova, shimmer
rate: 200,
apiKeys: {
openai: process.env.OPENAI_API_KEY
}
});
await openaiSpeaker.speak('Using OpenAI TTS-1 model');
const elevenlabsSpeaker = new SpeakEasy({
provider: 'elevenlabs',
elevenlabsVoiceId: 'EXAVITQu4vr4xnSDxMaL',
rate: 180,
apiKeys: {
elevenlabs: process.env.ELEVENLABS_API_KEY
}
});
await elevenlabsSpeaker.speak('Using ElevenLabs voice synthesis');
const groqSpeaker = new SpeakEasy({
provider: 'groq',
rate: 220,
apiKeys: {
groq: process.env.GROQ_API_KEY
}
});
await groqSpeaker.speak('Using Groq fast inference');
SpeakEasy includes a priority-based queue system for managing multiple speech requests.
// High priority - plays immediately, interrupts current speech
await speaker.speak('Urgent alert!', { priority: 'high', interrupt: true });
// Normal priority - queued in order (default)
await speaker.speak('Regular message', { priority: 'normal' });
// Low priority - queued after normal priority items
await speaker.speak('Background info', { priority: 'low' });
// Check queue status
const queueLength = speaker.getQueueLength();
console.log(`${queueLength} items in queue`);
// Clear all queued items
speaker.clearQueue();
// Interrupt current speech and clear queue
await speaker.interrupt();
Cache is automatically enabled for API-based providers when API keys are present:
const speaker = new SpeakEasy({
provider: 'openai',
apiKeys: { openai: process.env.OPENAI_API_KEY }
// cache automatically enabled
});
// First call - generates and caches audio
await speaker.speak('Hello, world!');
// Second call - uses cached audio (much faster)
await speaker.speak('Hello, world!');
const speaker = new SpeakEasy({
provider: 'openai',
cache: {
enabled: true,
ttl: '7d', // 7 days
maxSize: '100mb', // 100 megabytes
dir: '/custom/cache/path'
}
});
// Get cache statistics
const stats = await speaker.getCacheStats();
console.log(`Cache has ${stats.totalEntries} entries`);
console.log(`Total size: ${stats.totalSize} bytes`);
console.log(`Hit rate: ${stats.hitRate * 100}%`);
// Cache is automatically managed - no manual operations needed
import { SpeakEasy } from '@arach/speakeasy';
const speaker = new SpeakEasy({ provider: 'openai' });
try {
await speaker.speak('Hello, world!');
} catch (error) {
console.error('Speech failed:', error.message);
// Fallback to system voice
const fallback = new SpeakEasy({ provider: 'system' });
await fallback.speak('Fallback message');
}
// API key missing
try {
await say('Hello', 'openai');
} catch (error) {
if (error.message.includes('API key')) {
console.error('Set OPENAI_API_KEY environment variable');
}
}
// Rate limit exceeded
try {
await say('Hello', 'elevenlabs');
} catch (error) {
if (error.message.includes('rate limit')) {
console.error('Too many requests, try again later');
}
}
// Network/provider issues
try {
await say('Hello', 'openai');
} catch (error) {
// Automatic fallback to system voice
await say('Fallback message', 'system');
}
// Different speakers for different purposes
const alertSpeaker = new SpeakEasy({
provider: 'openai',
openaiVoice: 'nova',
volume: 0.9,
rate: 220
});
const infoSpeaker = new SpeakEasy({
provider: 'system',
systemVoice: 'Samantha',
volume: 0.6,
rate: 180
});
// Use appropriate speaker for context
await alertSpeaker.speak('Critical error occurred!', { priority: 'high' });
await infoSpeaker.speak('Process completed successfully');
class NotificationSpeaker {
private speaker: SpeakEasy;
constructor() {
this.speaker = new SpeakEasy({
provider: 'openai',
openaiVoice: 'nova',
cache: { enabled: true }
});
}
async success(message: string) {
await this.speaker.speak(`Success: ${message}`, { priority: 'normal' });
}
async warning(message: string) {
await this.speaker.speak(`Warning: ${message}`, { priority: 'high' });
}
async error(message: string) {
await this.speaker.speak(`Error: ${message}`, {
priority: 'high',
interrupt: true
});
}
}
// Usage
const notifications = new NotificationSpeaker();
await notifications.success('Build completed');
await notifications.error('Build failed');
// Hooked notification handler
import { say } from '@arach/speakeasy';
export async function speakNotification(message: string, project: string) {
// Customize message for speech
const spokenMessage = `In ${project}, ${message}`;
try {
// Try OpenAI first, fallback to system
await say(spokenMessage, 'openai');
} catch (error) {
await say(spokenMessage, 'system');
}
}
// Usage in hooks
await speakNotification('Claude needs your permission', 'SpeakEasy');
await speakNotification('Build completed successfully', 'MyProject');
// Main configuration interface
interface SpeakEasyConfig {
provider?: 'system' | 'openai' | 'elevenlabs' | 'groq';
systemVoice?: string;
openaiVoice?: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer';
elevenlabsVoiceId?: string;
rate?: number;
volume?: number;
apiKeys?: {
openai?: string;
elevenlabs?: string;
groq?: string;
};
tempDir?: string;
debug?: boolean;
cache?: CacheConfig;
}
// Cache configuration
interface CacheConfig {
enabled?: boolean;
ttl?: string | number;
maxSize?: string | number;
dir?: string;
}
// Speech options
interface SpeakEasyOptions {
priority?: 'high' | 'normal' | 'low';
interrupt?: boolean;
cleanup?: boolean;
}
// Cache statistics
interface CacheStats {
totalEntries: number;
totalSize: number;
cacheHits: number;
cacheMisses: number;
hitRate: number;
avgFileSize: number;
dir?: string;
}
// Use environment variables for API keys
const speaker = new SpeakEasy({
provider: 'openai',
apiKeys: {
openai: process.env.OPENAI_API_KEY,
elevenlabs: process.env.ELEVENLABS_API_KEY
}
});
// Use global config file for defaults
// ~/.config/speakeasy/settings.json handles fallbacks
// Enable caching for repeated text
const speaker = new SpeakEasy({
provider: 'openai',
cache: { enabled: true, ttl: '1d' }
});
// Reuse speaker instances
class AppNotifications {
private static speaker = new SpeakEasy({
provider: 'openai',
cache: { enabled: true }
});
static async notify(message: string) {
await this.speaker.speak(message);
}
}
// Graceful fallback pattern
async function reliableSpeech(text: string) {
const providers = ['openai', 'elevenlabs', 'system'];
for (const provider of providers) {
try {
await say(text, provider as any);
return; // Success
} catch (error) {
console.warn(`${provider} failed:`, error.message);
continue; // Try next provider
}
}
throw new Error('All providers failed');
}
const speaker = new SpeakEasy({
provider: 'openai',
debug: true // Enables detailed logging
});
await speaker.speak('Debug this message');
🔍 SpeakEasy Debug: Using provider 'openai'
🔍 SpeakEasy Debug: Voice 'nova', Rate 180 WPM, Volume 70%
🔍 SpeakEasy Debug: Cache key: abc123-def456
📦 SpeakEasy Debug: Cache hit - using cached audio
🔊 SpeakEasy Debug: Playing audio from cache
For more advanced configuration options, see Configuration Guide.