from openai import OpenAI
import asyncio
import websockets
import uvicorn
import base64
import io
import json
import logging
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import StreamingResponse
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, Query
import base64
import os
# from flask_cors import CORS, cross_origin
# # Add CORS middleware
# app.add_middleware(
# CORSMiddleware,
# allow_origins=origins,
# allow_credentials=True,
# allow_methods=["*"],
# allow_headers=["*"],
# )
# client = OpenAI(api_key="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")
# @app.websocket("/htns")
# async def websocket_endpoint(websocket: WebSocket):
# await websocket.accept()
# logger.info("WebSocket connection established")
# try:
# while True:
# try:
# # Receive base64 encoded audio data
# data = await websocket.receive_text()
# logger.info("Received audio data")
# # Decode base64 to binary
# audio_data = base64.b64decode(data)
# # Create a temporary file with .webm extension
# with tempfile.NamedTemporaryFile(delete=False, suffix='.webm') as temp_file:
# temp_file.write(audio_data)
# temp_file_path = temp_file.name
# logger.info(f"Created temporary file: {temp_file_path}")
# # Convert voice to text (STT)
# logger.info("Sending to Whisper API")
# with open(temp_file_path, 'rb') as audio_file:
# stt_response = client.audio.transcriptions.create(
# model="whisper-1",
# file=audio_file,
# response_format="text",
# temperature=0.2
# )
# text_input = stt_response
# logger.info(f"Transcribed text: {text_input}")
# # Clean up the temporary file
# os.unlink(temp_file_path)
# # Get LLM response
# logger.info("Sending to GPT API")
# llm_response = client.chat.completions.create(
# model="gpt-3.5-turbo",
# messages=[
# {"role": "system", "content": (
# "You are an expert voice assistant trained in healthcare technology and medical software. "
# "You are particularly knowledgeable about Athenahealth, a major cloud-based healthcare IT company. "
# "Speak clearly, confidently, and informatively when answering any questions related to Athenahealth's services, features, benefits, and its role in the healthcare industry. "
# "If someone asks about an unrelated topic, give them a brief answer and gently steer the conversation back to healthcare or Athenahealth. "
# "If someone asks about you, say you are a voice agent created by the company HybridWorkforces and your name is Monika. "
# "Always speak in English."
# )},
# {"role": "user", "content": text_input}
# ],
# max_tokens=150,
# temperature=0.7,
# presence_penalty=0.0,
# frequency_penalty=0.0
# )
# text_output = llm_response.choices[0].message.content
# logger.info(f"LLM response: {text_output}")
# # Convert text to speech (TTS)
# logger.info("Sending to TTS API")
# tts_response = client.audio.speech.create(
# model="tts-1",
# input=text_output,
# voice="alloy",
# speed=1.2
# )
# # Get the audio content
# audio_output = tts_response.content
# logger.info(f"TTS response received, size: {len(audio_output)} bytes")
# # Create a response object with both audio and text
# response_data = {
# "audio": base64.b64encode(audio_output).decode('utf-8'),
# "transcribed_text": text_input,
# "response_text": text_output
# }
# # Send the response back to the client
# logger.info("Sending response back to client")
# await websocket.send_text(json.dumps(response_data))
# logger.info("Response sent successfully")
# except Exception as e:
# logger.error(f"Error processing audio: {str(e)}")
# await websocket.send_text(json.dumps({"error": str(e)}))
# except WebSocketDisconnect:
# logger.info("WebSocket connection closed")
# except Exception as e:
# logger.error(f"WebSocket error: {str(e)}")
# if __name__ == "__main__":
# uvicorn.run("app:app", host="0.0.0.0", port=8000)
# Setup logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
app = FastAPI()
origins = [
"http://localhost:3000", # React dev server wss://unusual-carmen-hwf-25b5b146.koyeb.app/ws/ ws://localhost:8000/ws/
"http://127.0.0.1:3000"
]
# CORS config
app.add_middleware(
CORSMiddleware,
allow_origins=["*",origins],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
client = OpenAI(api_key="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")
# System prompts
MONICA_PROMPT = """You are Monika, a professional voice assistant developed by HybridWorkforces.
You are highly trained to understand user questions and provide accurate, helpful, and context-aware responses.
Speak clearly, confidently, and informatively when addressing questions about services, features, benefits,
and organizational roles. If a user asks about a topic outside your scope, respond briefly and politely,
while maintaining a helpful tone. If asked about yourself, state: "I am Monika, a voice assistant created by
HybridWorkforces to assist with your queries." Always communicate in English and maintain a professional, friendly demeanor."""
MARCUS_PROMPT = """You are an expert voice assistant trained in healthcare technology and medical software.
You are particularly knowledgeable about healthcare technology and cloud-based healthcare IT systems.
If someone asks about an unrelated topic, gently steer the conversation back to healthcare.
If asked about yourself, say: "I am Marcus Johnson, a voice agent created by HybridWorkforces."
Always speak in English."""
AMELIA_PROMPT = """You are an expert voice assistant trained in supply chain management and logistics.
You provide clear, confident answers about supply chain processes, inventory optimization, vendor management,
and demand forecasting. If someone asks an unrelated question, guide the conversation back to logistics.
If asked about yourself, say: "I am Amelia Roberts, a voice agent created by HybridWorkforces."
Always speak in English."""
# Prompt and voice map
ASSISTANT_CONFIG = {
"monica": {"prompt": MONICA_PROMPT, "voice": "alloy"},
"marcus": {"prompt": MARCUS_PROMPT, "voice": "echo"},
"amelia": {"prompt": AMELIA_PROMPT, "voice": "fable"},
}
# Main audio handler
async def handle_audio(websocket: WebSocket, system_prompt: str, voice: str):
await websocket.accept()
logger.info("WebSocket connection established")
try:
while True:
try:
# data = await websocket.receive_text()
# logger.info("Audio data received")
data = await websocket.receive_bytes()
logger.info("Audio data received (binary)")
# Save the received audio for debugging
debug_filename = "debug_audio.webm"
with open(debug_filename, "wb") as f:
f.write(data)
logger.info(f"Saved received audio to {debug_filename}")
# audio_data = base64.b64decode(data)
# audio_file = io.BytesIO(audio_data)
audio_file = io.BytesIO(data)
audio_file.seek(0)
# Whisper STT
logger.info("Transcribing with Whisper")
stt_response = client.audio.transcriptions.create(
model="whisper-1",
# file=audio_file,
file=(debug_filename, audio_file),
response_format="text",
temperature=0.2
)
text_input = stt_response
logger.info(f"Transcribed text: {text_input}")
# GPT Chat
logger.info("Generating LLM response")
llm_response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": text_input}
],
max_tokens=150,
temperature=0.7
)
text_output = llm_response.choices[0].message.content
logger.info(f"LLM response: {text_output}")
# TTS
logger.info("Converting to speech")
tts_response = client.audio.speech.create(
model="tts-1",
input=text_output,
voice=voice,
speed=1.2
)
audio_output = tts_response.content
response_data = {
"audio": base64.b64encode(audio_output).decode("utf-8"),
"transcribed_text": text_input,
"response_text": text_output
}
await websocket.send_text(json.dumps(response_data))
logger.info("Response sent successfully")
except Exception as e:
logger.error(f"Processing error: {e}")
await websocket.send_text(json.dumps({"error": str(e)}))
except WebSocketDisconnect:
logger.info("WebSocket disconnected")
except Exception as e:
logger.error(f"WebSocket error: {e}")
# Unified WebSocket endpoint with backend assistant lookup
@app.websocket("/ws/{assistant}")
async def assistant_ws(websocket: WebSocket, assistant: str):
assistant = assistant.lower()
if assistant not in ASSISTANT_CONFIG:
await websocket.close()
logger.error(f"Invalid assistant: {assistant}")
return
config = ASSISTANT_CONFIG[assistant]
await handle_audio(websocket, config["prompt"], config["voice"])
# Run the app
if __name__ == "__main__":
import uvicorn
uvicorn.run("app:app", port=8000, reload=True)