"""
Chatbot (py.cafe + Solara)
Minimal Solara chat UI that streams responses from OpenAI.
Uses a py.cafe secret called OPENAI_API_KEY when available.
"""
import os
from typing import List, cast
import solara
import solara.lab
from openai import AsyncOpenAI
from openai.types.chat import ChatCompletionMessageParam
from typing_extensions import TypedDict
class MessageDict(TypedDict):
role: str # "user" or "assistant"
content: str
messages: solara.Reactive[List[MessageDict]] = solara.reactive([])
# Read key from py.cafe secret (preferred) or env var (fallback)
OPENAI_API_KEY = None
try:
import pycafe
OPENAI_API_KEY = pycafe.get_secret(
"OPENAI_API_KEY",
"""We need an OpenAI API key to generate text.
Set a py.cafe secret named OPENAI_API_KEY.
""",
)
except ModuleNotFoundError:
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
openai = AsyncOpenAI(api_key=OPENAI_API_KEY) if OPENAI_API_KEY else None
def no_api_key_message():
messages.value = [
{
"role": "assistant",
"content": "No OpenAI API key found. Set a py.cafe secret called OPENAI_API_KEY.",
},
]
@solara.lab.task
async def promt_ai(user_message: str):
if openai is None:
no_api_key_message()
return
# Append user message
messages.value = [*messages.value, {"role": "user", "content": user_message}]
# Start streaming completion
try:
response = await openai.chat.completions.create(
model="gpt-5.2",
messages=cast(List[ChatCompletionMessageParam], messages.value),
stream=True,
)
except Exception as e:
messages.value = [
*messages.value,
{"role": "assistant", "content": f"Request failed: {e}"},
]
return
# Placeholder assistant message (for live streaming updates)
messages.value = [*messages.value, {"role": "assistant", "content": ""}]
try:
async for chunk in response:
choice = chunk.choices[0]
# Stop if done
if getattr(choice, "finish_reason", None) == "stop":
return
delta = choice.delta.content
if not delta:
continue
# Append streamed delta to the last assistant message
messages.value = [
*messages.value[:-1],
{"role": "assistant", "content": messages.value[-1]["content"] + delta},
]
except Exception as e:
# Replace placeholder with error message
messages.value = [
*messages.value[:-1],
{"role": "assistant", "content": f"Streaming failed: {e}"},
]
@solara.component
def Page():
with solara.Column(style={"width": "100%", "height": "60vh"}):
with solara.lab.ChatBox():
for item in messages.value:
with solara.lab.ChatMessage(
user=item["role"] == "user",
avatar=False,
name="ChatGPT" if item["role"] == "assistant" else "User",
color="rgba(0,0,0, 0.06)" if item["role"] == "assistant" else "#ff991f",
avatar_background_color="primary" if item["role"] == "assistant" else None,
border_radius="20px",
):
solara.Markdown(item["content"])
if promt_ai.pending:
solara.Text("I'm thinking...", style={"font-size": "1rem", "padding-left": "20px"})
solara.ProgressLinear()
solara.lab.ChatInput(send_callback=promt_ai, disabled_send=promt_ai.pending, autofocus=True).key("input")