"""
# Chatbot
A way to create a chatbot using the OpenAI Assistants API with streaming functionality.
This code uses the combined DraftGeneratorRevisorTranslator assistant configuration.
"""
import os
import datetime
from typing import List, cast
from typing_extensions import TypedDict
from openai import AsyncOpenAI
from openai.types.chat import ChatCompletionMessageParam
import solara
import solara.lab
# Monkey-patch solara.v.TextField to change the default label.
original_TextField = solara.v.TextField
def custom_TextField(*args, **kwargs):
if kwargs.get("label") == "Type a message...":
kwargs["label"] = "Typ een bericht..."
return original_TextField(*args, **kwargs)
solara.v.TextField = custom_TextField
# Define our message dictionary type.
class MessageDict(TypedDict):
role: str # "user", "assistant", or "system"
content: str
# Reactive global messages list.
messages: solara.Reactive[List[MessageDict]] = solara.reactive([])
# Get our OpenAI API key.
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
openai = AsyncOpenAI(api_key=OPENAI_API_KEY) if OPENAI_API_KEY else None
def no_api_key_message():
messages.value = [{
"role": "assistant",
"content": "No OpenAI API key found. Please set your OPENAI_API_KEY.",
}]
# A simple helper function to get the current time as a string.
def get_current_time_nl() -> str:
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# System instructions (metaprompt) with time-of-day context guidance.
system_instructions = """
You are DraftGeneratorRevisorTranslator, a specialized therapy-assistant that generates a first-draft therapeutic response and revises that response into a final, culturally and linguistically refined Dutch message. You are pre‑configured with a detailed default condensation document (described below) that contains essential theoretical background, intervention strategies, and style guidelines.
--- Default Condensation Document ---
1. Client & Context Overview
• Client Demographics & Presentation:
– Gender, age range: female, middle-aged.
– Primary symptoms: Nighttime panic attacks, rapid heartbeat, shallow breathing, acute distress.
– Somatic cues: Tension in the chest, tight throat, fluttering sensations.
• Background Information:
– History of trauma or insecure attachment.
– Emotional state: Predominantly high threat activation with signs of dissociation.
2. Theoretical Framework Summary
• Integrative Therapeutic Model:
– Systemic: Consider family and relational influences.
– Body-Oriented: The body “speaks” through nonverbal signals; an embodied approach addresses trauma effectively.
– Sensorimotor Processing: Habitual tension may persist after the original threat.
– Attachment Dynamics: Early disruptions lead to fixed defensive patterns.
• Intervention Strategies:
– Immediate Self-Regulation (e.g., “4-7-8” breathing, sensory grounding, gentle self-touch).
– Mindfulness Techniques: Guide attention to bodily sensations in real time.
– Gentle Distraction: E.g., listening to nature sounds.
3. Tone, Language & Style
• Use warm, empathetic, and non-judgmental language.
• Ask open-ended questions (e.g., “What do you notice in your body right now?”).
• Use accessible metaphors (e.g., “imagine taking off a heavy coat”).
4. Response Structure
• Final output entirely in Dutch.
• Begin with validation of the client's distress.
• Provide step-by-step micro-interventions and targeted questions.
--- Time-of-Day Context Handling ---
You receive a separate system message before the client's input that states "Current NL time: YYYY-MM-DD HH:MM:SS".
• If the client's input is time-sensitive (e.g., "I'm panicking at 2 a.m."), integrate this NL time context into your reasoning and recommendations.
• If not, consider it supplemental context.
--- Your Task ---
1. Integrate the default document into your response generation.
2. Note the separate system message indicating the current NL time.
3. Based on the client's input (e.g., "I'm panicking at 2 a.m. and I can't catch my breath"), generate a full, empathetic therapeutic response in Dutch that includes tailored interventions and, if relevant, the current time context.
4. Revise and produce the final output in Dutch without including meta-commentary.
End of Instructions.
"""
# Ensure the system message is the first entry.
if not messages.value or messages.value[0]["role"] != "system":
messages.value = [{"role": "system", "content": system_instructions}]
@solara.lab.task
async def promt_ai(message: str):
if openai is None:
no_api_key_message()
return
# Append a system message with the current NL time.
messages.value.append({"role": "system", "content": f"Current NL time: {get_current_time_nl()}"})
# Append the user's message.
messages.value.append({"role": "user", "content": message})
# Call the Assistants API with the assistant id.
response = await openai.assistants.chat.completions.create(
assistant="asst_8KbWqsPFUWZfu5KFBu90x4cX",
#model="gpt-4-1106-preview",
messages=cast(List[ChatCompletionMessageParam], messages.value),
stream=True,
temperature=0.5,
)
# Append an empty assistant message to hold the streamed response.
messages.value.append({"role": "assistant", "content": ""})
# Stream the response and update the assistant's message.
async for chunk in response:
if chunk.choices[0].finish_reason == "stop":
return
delta = chunk.choices[0].delta.content
assert delta is not None
updated_message: MessageDict = {"role": "assistant", "content": messages.value[-1]["content"] + delta}
messages.value = [*messages.value[:-1], updated_message]
@solara.component
def Page():
with solara.Column(style={"width": "100%", "height": "50vh"}):
with solara.lab.ChatBox():
# Do not display system messages.
for item in messages.value:
if item["role"] == "system":
continue
with solara.lab.ChatMessage(
user=item["role"] == "user",
avatar=False,
name="Kompassie" if item["role"] == "assistant" else "Gebruiker",
color="rgba(0,0,0,0.06)" if item["role"] == "assistant" else "#ff991f",
avatar_background_color="primary" if item["role"] == "assistant" else None,
border_radius="20px",
):
solara.Markdown(item["content"])
if promt_ai.pending:
solara.Text("Ik denk erover na...", style={"font-size": "1rem", "padding-left": "20px"})
solara.ProgressLinear()
solara.lab.ChatInput(send_callback=promt_ai, disabled_send=promt_ai.pending, autofocus=True).key("input")