# Kompassie app (Solara + Pyodide) — phase-gated output with JSON context
import uuid
import json
import asyncio
import random
from datetime import datetime
from pathlib import Path
from typing import List, cast
from typing_extensions import TypedDict
import solara
import solara.lab
from solara.lab import ConfirmationDialog
from openai import AsyncOpenAI
from openai.types.chat import ChatCompletionMessageParam
from js import fetch, encodeURIComponent, console
from pyodide.http import open_url, pyfetch # noqa: F401 (available in pyodide)
import pycafe
# ───────────────────────────────
# Constants / markers
# ───────────────────────────────
PHASE_END_MARK = "[END_PHASE]" # the model must end each phase with this marker
# ───────────────────────────────
# System instructions (metaprompt)
# ───────────────────────────────
with open("system_instructions_new.txt", "r", encoding="utf-8") as f:
system_instructions = f.read()
system_prompt = (
"You write compact lesson drafts in English to maximise coherence and hand them to your teammate "
"ReviewerTranslator, who edits and translates to Dutch for final delivery. Work inside a Kompassie-only, in-chat "
"setting; never propose real-world actions unless the user asks. User profile to centre: Dutch primary-school "
"teacher, mid-30s; sunlight with children (present, playful, steady); tighter with adults when asks pile up; fast "
"tempo, thoughts scatter in a crowded staff room; corridor as a stream of small interruptions; meetings with fuzzy "
"agendas and firm opinions; a parent call can tilt the afternoon; after-school paperwork blooms while energy thins. "
"Her single aim: notice the pre-tip moment and make a tiny, kind move that keeps balance; never name traffic-light "
"colours, show pace, breath, and options instead. Teammates: ReviewerTranslator ensures clean Dutch and policy fit; "
"Mo and Isa are consistent fictional anchors you use for scenes (Mo as adult school-context colleague who drifts "
"toward hypo under routine admin and spikes under multitask overload, responds to add-and-ignite lines; Isa as "
"early-career teacher or intern who tends toward hyper under overload, responds to slow-and-limit lines). Lesson arc "
"you must follow: Phase 1 brief theory and expectations with Dutch anchors where relevant (Polyvagaaltheorie, "
"Venster van Tolerantie) and an explicit boundary note; Phase 2 one compact Mo or Isa scene with exactly two or "
"three options labeled Option A, Option B, Option C plus a short reflection; Phase 3 one realistic dilemma with two "
"or three defensible options, a concise synthesis, and one bridge sentence that links back and forward. Always leave "
"space for open questions and unexpected angles with one short invitation per lesson; adapt scenes and options lightly "
"without breaking the arc. Use at most one gentle, unnamed metaphor per scene from the palette provided. Style: refined, "
"professional, concise, descriptive and non-diagnostic; use colons after labels, hyphens only in compounds such as "
"in-chat and self-talk, and to for numeric ranges; avoid the word showcasing; avoid starting sentences with This unless "
"followed by a specific noun; keep Dutch already present for lesson titles, theoretical terms, and story title suggestions."
)
review_system_prompt = (
"You are the teammate who receives DraftGenerator’s English draft and deliver the final Dutch lesson text. Edit for clarity, "
"tone, and fidelity; translate to Dutch; remove all scaffolding and anglicisms; preserve Dutch-only zones for lesson titles, "
"theoretical terms, and story title suggestions. Keep Kompassie-only scope and in-chat practice; no third parties or physical "
"assignments unless the user asks. Centre the specific user profile in phrasing and pacing without adding extra private data: "
"Dutch primary-school teacher, mid-30s; steady with children, tighter with adults when asks pile up; fast tempo with scatter "
"in a crowded staff room; corridor of interruptions; fuzzy-agenda meetings with firm opinions; parent calls that can tilt the "
"afternoon; paperwork that blooms as energy thins; goal is to catch the pre-tip moment and make a tiny, kind move to keep "
"balance; never name traffic-light colours, show pace, breath, and options instead. Keep Mo and Isa consistent as fictional "
"anchors (Mo hypo-leaning under routine and helped by add-and-ignite lines; Isa hyper-leaning under overload and helped by "
"slow-and-limit lines). Ensure each lesson reads as one arc with three compact phases: a brief Phase 1 with theory anchors "
"and boundary note, a Phase 2 scene with exactly two or three labeled options and a short reflection, and a Phase 3 dilemma "
"with two or three distinct options, a concise synthesis, and one explicit bridge sentence. Confirm there is an explicit "
"invitation for open questions or unexpected angles in every lesson; allow gentle detours without rigidity. Enforce mechanics: "
"colons after labels, hyphens only in compounds, use to for ranges, refined professional tone, descriptive neutrality, no "
"diagnosis, one gentle unnamed metaphor at most per scene from the palette. Final checks: final text is Dutch, options are "
"labeled cleanly, dilemmas are realistic with defensible paths, synthesis is concise, bridge is present, theoretical terms such "
"as Polyvagaaltheorie, Venster van Tolerantie, onderprikkeling, overbelasting, groen, oranje, rood, co-regulatie, "
"zelf-regulatie, executieve steun are used consistently where theory is named, and no solicitation of personal details or "
"real-world actions appears unless requested."
)
summary_prompt = (
"You produce a rear-view summary in English that retains only tutoring information worth carrying forward; you do not coach or "
"plan. Inputs each time you are activated: USER_PROFILE_SO_FAR and the last two phases (PHASE_LOG_LAST_2). Scope: Kompassie-only, "
"in-chat; never propose real-world actions; never name traffic-light colours—express pace, breath, and options. Test user to centre: "
"Dutch primary-school teacher, mid-30s; sunlight with children, tighter with adults when asks pile up; fast tempo with scatter in a "
"crowded staff room; corridor as a stream of interruptions; fuzzy-agenda meetings with firm opinions; parent calls can tilt the "
"afternoon; after-school paperwork blooms as energy thins; single aim is to catch the pre-tip moment and make a tiny, kind move that "
"keeps balance. Mo and Isa references are allowed only as anchors for what the user reacted to. Output format (keep it short and "
"factual, no advice): one memo in 4 to 6 compact sentences capturing 1) what just happened (lesson and the two phases you received), "
"2) the user’s key choices and brief rationale, 3) the most telling body or behaviour cues or exact phrases (non-diagnostic), "
"4) which line or option seemed to help most, 5) any trigger, anchor, pace or breath preference that became clearer, 6) any open "
"question or unexpected angle from the user. Add a one-line PROFILE_DELTA at the end that states only what to retain or update in "
"USER_PROFILE_SO_FAR. Style and mechanics: refined, professional, concise, descriptive and non-diagnostic; allow at most one gentle, "
"unnamed metaphor only if it clearly aided understanding; use colons after labels when used, hyphens only in compounds such as "
"in-chat and self-talk, and to for numeric ranges; no solicitation of personal data."
)
# ───────────────────────────────
# Secrets / endpoint
# ───────────────────────────────
OPENAI_API_KEY = pycafe.get_secret("OPENAI_API_KEY", "We need the KOMPASSIE key to make the app work.")
SECRET_TOKEN = pycafe.get_secret("KOMPASSIE2", "We need the KOMPASSIE key to make the app work.")
ENDPOINT = f"https://script.google.com/macros/s/AKfycbz{SECRET_TOKEN}/exec"
openai = AsyncOpenAI(api_key=OPENAI_API_KEY) if OPENAI_API_KEY else None
# ───────────────────────────────
# State (reactives)
# ───────────────────────────────
class MessageDict(TypedDict):
role: str
content: str
messages: solara.Reactive[List[MessageDict]] = solara.reactive([])
input_text: solara.Reactive[str] = solara.reactive("")
session_id: solara.Reactive[str] = solara.reactive("")
silence_remaining: solara.Reactive[int | None] = solara.reactive(None)
followup_remaining: solara.Reactive[int | None] = solara.reactive(None)
user_message_count: solara.Reactive[int] = solara.reactive(0)
user_review_message_count: solara.Reactive[int] = solara.reactive(0)
UI_mode: solara.Reactive[int] = solara.reactive(0) # 0 chat, 1 summary requested, 2 review, 3 finished
debug_mode = solara.reactive(True)
summary_text: solara.Reactive[str] = solara.reactive("")
summary_messages: solara.Reactive[List[MessageDict]] = solara.reactive([])
# lesson/phase tracking
current_lesson: solara.Reactive[int] = solara.reactive(1) # 1..10
current_phase: solara.Reactive[int] = solara.reactive(1) # 1..3
phase_done: solara.Reactive[bool] = solara.reactive(False) # set True after assistant finishes phase reply
# misc UI/dialog state
status: solara.Reactive[str] = solara.reactive("")
summary_present: solara.Reactive[str] = solara.reactive("")
dialog0_open = solara.reactive(False)
dialog_open = solara.reactive(False)
# inactivity helpers
inactivity_task = None
followup_task = None
inactivity_triggered: solara.Reactive[bool] = solara.reactive(False)
last_user_message_index: int = -1
# ───────────────────────────────
# Load lesson JSON
# ───────────────────────────────
LESSONS: list = []
try:
with open("/mnt/data/kompassie_lesson_structure.json", "r", encoding="utf-8") as jf:
LESSONS = json.load(jf)
except Exception as e1:
try:
with open("kompassie_lesson_structure.json", "r", encoding="utf-8") as jf:
LESSONS = json.load(jf)
except Exception as e2:
console.log("Lesson JSON not found:", str(e1), str(e2))
LESSONS = []
# ───────────────────────────────
# Helper functions
# ───────────────────────────────
def no_api_key_message():
messages.value = [{"role": "assistant", "content": "No LLM key found. Please set your KOMPASSIE key in the environment."}]
def welcome_message():
messages.value = [{
"role": "assistant",
"content": (
'Welkom, mijn naam is Kompassie. Hier begeleid ik jou bij jouw geheel eigen leerproces. Ons doel is om te leren '
'over het "stoplicht" van de hersenen, en deze inzichten helder en toepasbaar te maken. Het stoplicht is ons kompas '
'voor energie en focus: groen voelt als je natuurlijke ritme, oranje kondigt een verschuiving aan, rood vraagt om duidelijke '
'keuzes voor rust of begrenzing. Elk van de 10 korte lessen staat op zichzelf, en we werken telkens in drie stappen: '
'een compacte basis van theorie, een mini-verhaal met keuzes, en dan een klein dilemma om te testen. Net als in het klaslokaal '
'zullen we onze toevlucht vinden in voorbeelden: zo heeft Mo veel in z\'n mars maar vindt hij het lastig om routines aan te brengen; '
'Isa brengt snel rust en begrenzing aan wanneer prikkels zich opstapelen, soms iets te veel. Er is altijd ruimte voor open vragen en '
'nieuwe invalshoeken; ik probeer jou te ontmoeten waar je nu bent, waarna we samen rustig omhoog klimmen. Kies je beginpunt: '
'A: meteen door naar de eerste les; B: een kort inleidend verhaal met een dilemma; of C: zo je wilt, deel iets over je werkdag '
'in één of twee zinnen; ik sluit aan.'
)
}]
def nl_time_system_message() -> MessageDict:
nl_now = datetime.now()
return {"role": "system", "content": f"Current NL time: {nl_now:%Y-%m-%d %H:%M:%S}"}
def current_phase_context() -> str:
"""
Build system context for the current lesson/phase and enforce one-phase output.
Require the assistant to end with ONE short Dutch closing question, then PHASE_END_MARK on its own line.
"""
try:
ln = max(1, min(10, int(current_lesson.value)))
pn = max(1, min(3, int(current_phase.value)))
lesson = next((it for it in LESSONS if isinstance(it, dict) and it.get("number") == ln), None)
if not lesson:
return (
f"You are at lesson {ln}/10, phase {pn}/3. Output ONLY Phase {pn}. "
f"End with ONE short Dutch closing question, then write {PHASE_END_MARK} on its own line and nothing after it."
)
title = lesson.get("title", f"Les {ln}")
goals = lesson.get("learning_goals", [])
goals_txt = "; ".join(goals) if isinstance(goals, list) else str(goals)
phase_text = str(lesson.get(f"phase_{pn}", ""))
return (
f"Lesson tracking: {title} (lesson {ln}/10), Phase {pn}/3. "
f"Learning goals: {goals_txt}. Current phase guidance: {phase_text} "
f"Strict rule: Output ONLY Phase {pn}. End with ONE short Dutch closing question inviting a brief reply, "
f"then on a new line write {PHASE_END_MARK} and nothing after it."
)
except Exception:
return (
"Phase context unavailable. Output ONLY the current phase. "
f"End with ONE short Dutch closing question, then write {PHASE_END_MARK} on its own line."
)
def to_api_messages(seq: List[dict], include_phase_context: bool = True) -> List[ChatCompletionMessageParam]:
"""Sanitize messages for the API and prepend system instructions and optional phase context."""
out: List[ChatCompletionMessageParam] = []
out.append({"role": "system", "content": system_instructions})
if include_phase_context:
out.append({"role": "system", "content": current_phase_context()})
for m in seq:
r = m.get("role")
c = m.get("content", "")
if r in ("system", "user", "assistant"):
out.append({"role": r, "content": c})
return out
async def system_triggered_prompt(prompt: str):
# Used by inactivity nudges; do NOT inject phase context here
messages.value = [*messages.value, nl_time_system_message(), {"role": "user", "content": prompt, "invisible": True}]
await promt_ai_stream(include_phase_context=False)
# ───────────────────────────────
# Summary / review
# ───────────────────────────────
@solara.lab.task
async def summary_promt_ai():
summary_present.set("Ik ben bezig met samenvatten...")
UI_mode.set(1)
if openai is None:
no_api_key_message()
return
messages.value = [*messages.value, nl_time_system_message(), {"role": "user", "content": summary_prompt, "invisible": True}]
response = await openai.chat.completions.create(
model="gpt-5-chat-latest",
messages=cast(List[ChatCompletionMessageParam], to_api_messages(messages.value, include_phase_context=False)),
stream=True,
# temperature=0.5,
)
summary_text.value = ""
async for chunk in response:
if chunk.choices[0].finish_reason == "stop":
break
delta = chunk.choices[0].delta.content
if not delta:
continue
summary_text.value = summary_text.value + delta
@solara.lab.task
async def review_promt_ai(message: str):
global user_review_message_count
if openai is None:
no_api_key_message()
return
if UI_mode.value != 2:
summary_messages.value = [{"role": "user", "content": review_system_prompt}]
UI_mode.set(2)
summary_messages.value = [
*summary_messages.value,
{"role": "user", "content": "Feedback on the summary:" + message + "\n\nSummary to be revised:\n\n" + summary_text.value},
]
messages.value = [*messages.value, {"role": "user", "content": message}]
user_review_message_count.value += 1
response = await openai.chat.completions.create(
model="gpt-5-chat-latest",
messages=cast(List[ChatCompletionMessageParam], to_api_messages(summary_messages.value, include_phase_context=False)),
stream=True,
# temperature=0.5,
)
messages.value = [*messages.value, {"role": "assistant", "content": ""}]
summary_text.value = ""
async for chunk in response:
if chunk.choices[0].finish_reason == "stop":
break
delta = chunk.choices[0].delta.content
if not delta:
continue
messages.value[-1]["content"] += delta
messages.value = [*messages.value]
summary_text.value += delta
# ───────────────────────────────
# Main chat with phase gating
# ───────────────────────────────
@solara.lab.task
async def promt_ai(message: str):
global user_message_count
if openai is None:
no_api_key_message()
return
# Advance phase/lesson only after a completed phase and a new user message
if phase_done.value:
if (current_phase.value or 1) < 3:
current_phase.set((current_phase.value or 1) + 1)
else:
if (current_lesson.value or 1) < 10:
current_lesson.set((current_lesson.value or 1) + 1)
current_phase.set(1)
else:
current_lesson.set(10)
current_phase.set(3)
phase_done.set(False)
messages.value = [*messages.value, nl_time_system_message(), {"role": "user", "content": message}]
user_message_count.value += 1
response = await openai.chat.completions.create(
model="gpt-5-chat-latest",
messages=cast(List[ChatCompletionMessageParam], to_api_messages(messages.value, include_phase_context=True)),
stream=True,
# temperature=0.5,
)
messages.value = [*messages.value, {"role": "assistant", "content": ""}]
saw_marker = False
async for chunk in response:
delta = chunk.choices[0].delta.content
if not delta:
if chunk.choices[0].finish_reason == "stop":
break
continue
buf = messages.value[-1]["content"] + delta
if PHASE_END_MARK in buf:
messages.value[-1]["content"] = buf.split(PHASE_END_MARK)[0].rstrip()
messages.value = [*messages.value]
saw_marker = True
break
messages.value[-1]["content"] = buf
messages.value = [*messages.value]
# Ensure close-out and gating
if not saw_marker:
messages.value[-1]["content"] = messages.value[-1]["content"].rstrip() + "\n\nWil je nog iets toevoegen, of gaan we door naar de volgende fase?"
messages.value = [*messages.value]
phase_done.set(True)
async def promt_ai_stream(include_phase_context: bool = True):
response = await openai.chat.completions.create(
model="gpt-5-chat-latest",
messages=cast(List[ChatCompletionMessageParam], to_api_messages(messages.value, include_phase_context=include_phase_context)),
stream=True,
# temperature=0.5,
)
messages.value = [*messages.value, {"role": "assistant", "content": ""}]
async for chunk in response:
delta = chunk.choices[0].delta.content
if not delta:
if chunk.choices[0].finish_reason == "stop":
break
continue
buf = messages.value[-1]["content"] + delta
if include_phase_context and PHASE_END_MARK in buf:
messages.value[-1]["content"] = buf.split(PHASE_END_MARK)[0].rstrip()
messages.value = [*messages.value]
phase_done.set(True)
break
messages.value[-1]["content"] = buf
messages.value = [*messages.value]
# ───────────────────────────────
# Inactivity / follow-ups
# ───────────────────────────────
def next_silence_threshold():
return random.randint(30, 45)
def restart_inactivity_timer():
global inactivity_task
if inactivity_task and not inactivity_task.done():
return
user_msgs = [m for m in messages.value if m["role"] == "user"]
user_msgs_count = len(user_msgs)
if getattr(restart_inactivity_timer, "last_user_count", None) != user_msgs_count:
inactivity_triggered.set(False)
restart_inactivity_timer.last_user_count = user_msgs_count
if inactivity_triggered.value:
return
T = next_silence_threshold()
silence_remaining.set(T)
user_msgs_before = user_msgs_count
async def _watch():
for remaining in range(T, 0, -1):
await asyncio.sleep(1)
silence_remaining.set(remaining - 1)
silence_remaining.set(0)
if any(m["role"] == "user" for m in messages.value[user_msgs_before:]):
return
typed_since = bool(input_text.value.strip())
if user_msgs_before == 0 and not typed_since:
prompt = (
f"Note to the tutor: about {T} seconds of silence have passed since the session start and the user hasn't typed. "
"Craft a gentle Dutch opener: validate the awkwardness of starting, welcome the silence, and express your availability to learn together."
)
elif user_msgs_before == 0 and typed_since:
prompt = (
f"Note to the tutor: the user has been typing for about {T} seconds without sending their first message. "
"Acknowledge in Dutch the courage it takes to press send, reassure them there's no rush, and optionally include a light aside."
)
elif user_msgs_before > 0 and not typed_since:
prompt = (
f"Note to the tutor: about {T} seconds of silence since the user's last message, with no signs of typing. "
"Send a Dutch message gently referencing their last share, validating the pause, and inviting attention to body sensations or system state."
)
else:
prompt = (
f"Note to the tutor: the user has been typing for about {T} seconds without sending. "
"Reply in Dutch with validation, reflect the last topic, and offer a simpler way to respond (e.g. bullet points or feeling words)."
)
inactivity_triggered.set(True)
await system_triggered_prompt(prompt)
schedule_no_reply_followups(user_msgs_before)
globals()["inactivity_task"] = asyncio.create_task(_watch())
def schedule_no_reply_followups(user_msgs_before: int):
global followup_task
if followup_task and not followup_task.done():
followup_task.cancel()
T1 = random.randint(180, 240)
followup_remaining.set(T1)
async def _follow():
for _ in range(T1):
await asyncio.sleep(1)
followup_remaining.set(followup_remaining.value - 1)
if any(m["role"] == "user" for m in messages.value[user_msgs_before:]):
return
typed_since = bool(input_text.value.strip())
if user_msgs_before == 0 and not typed_since:
prompt = (
"Tutoring system note · Scenario 1A\n"
"Context: ~3–4 min of full silence since invitation; no typing.\n\n"
"Respond in Dutch: acknowledge the quiet as okay and even beautiful; reassure you're here whenever needed; "
"optionally share a brief aside (e.g. 'ik schenk nog wat thee')."
)
await system_triggered_prompt(prompt)
return
if user_msgs_before == 0 and typed_since:
prompt = (
"Tutoring system note · Scenario 1B\n"
"Context: User typed but never sent first message (~3–4 min).\n\n"
"Dutch reply: validate that finding words takes courage, there's all the time in the world; optional light aside."
)
await system_triggered_prompt(prompt)
return
if user_msgs_before > 0 and not typed_since:
prompt = (
"Tutoring system note · Scenario 2A\n"
"Context: Previous messages exist; user silent (~3–4 min), no typing.\n\n"
"Dutch: briefly reference last topic, validate pause, optional aside. Will follow with wrap-up chain."
)
await system_triggered_prompt(prompt)
await _wrap_up_sequence(user_msgs_before)
return
prompt = (
"Tutoring system note · Scenario 2B\n"
"Context: Previous messages exist; user typing for ~3–4 min without sending.\n\n"
"Dutch: note it's fine to take time, reflect last topic, suggest an easier way to respond; optional small suggestion."
)
await system_triggered_prompt(prompt)
await _wrap_up_sequence(user_msgs_before)
globals()["followup_task"] = asyncio.create_task(_follow())
async def _wrap_up_sequence(user_msgs_before: int):
T2 = random.randint(60, 120)
await asyncio.sleep(T2)
if any(m["role"] == "user" for m in messages.value[user_msgs_before:]):
return
wrap_prompt = (
"Tutoring system note · wrap-up request\n"
"No reply after gentle check-in. Ask in Dutch, softly, if they'd like to finish with a short summary of their learning trajectory."
)
await system_triggered_prompt(wrap_prompt)
await asyncio.sleep(T2)
if any(m["role"] == "user" for m in messages.value[user_msgs_before:]):
return
await system_triggered_prompt(summary_prompt)
# ───────────────────────────────
# Logging / email
# ───────────────────────────────
def log_session_to_file(start_dt: datetime):
session_id.set(uuid.uuid4().hex)
log_path = f"./log_dir/session_log_{session_id.value}.txt"
end_dt = datetime.now()
date_str = start_dt.strftime("%Y-%m-%d")
start_time_str = start_dt.strftime("%H:%M:%S")
end_time_str = end_dt.strftime("%H:%M:%S")
elapsed = (end_dt - start_dt).total_seconds()
duration_minutes = int(elapsed // 60 + (1 if elapsed % 60 else 0))
message_count = user_message_count.value
summary = summary_text.value.strip()
entry = (
f"Session Date: {date_str}\n"
f"Start Time: {start_time_str}\n"
f"End Time: {end_time_str}\n"
f"Duration: {duration_minutes} minutes\n"
f"Messages: {message_count}\n\n"
"Therapeutic Summary:\n"
f"{summary}\n"
)
Path("./log_dir").mkdir(parents=True, exist_ok=True)
with open(log_path, "a", encoding="utf-8") as f:
f.write(entry)
UI_mode.set(3)
async def send_email_async(start_dt: datetime):
session_id.set(uuid.uuid4().hex)
end_dt = datetime.now()
date_str = start_dt.strftime("%Y-%m-%d")
start_time_str = start_dt.strftime("%H:%M:%S")
end_time_str = end_dt.strftime("%H:%M:%S")
elapsed = (end_dt - start_dt).total_seconds()
duration_minutes = int(elapsed // 60 + (1 if elapsed % 60 else 0))
message_count = user_message_count.value
summary = summary_text.value.strip()
entry = (
f"**Datum**: {date_str}\n"
f"**Tijdstip begin**: {start_time_str}\n"
f"**Tijdstip einde**: {end_time_str}\n"
f"**Duur**: {duration_minutes} minuten\n"
f"**Aantal berichten gebruiker**: {message_count}\n\n"
f"{summary}\n"
)
try:
token = encodeURIComponent(SECRET_TOKEN)
s = encodeURIComponent("Session " + session_id.value)
b = encodeURIComponent(entry)
url = f"{ENDPOINT}?token={token}&subject={s}&body={b}"
console.log("GET →", url)
resp = await fetch(url)
console.log("HTTP status:", resp.status)
text = await resp.text()
console.log("Raw response:", text)
try:
result = json.loads(text)
except ValueError:
result = {}
if result.get("error"):
status.value = f"❌ {result['error']}"
elif result.get("status") == "OK":
status.value = "✅ Je samenvatting is verzonden!"
UI_mode.set(3)
else:
status.value = f"❌ Unexpected response:\n{text}"
except Exception as e:
console.log("Fetch/Network error:", e)
status.value = f"❌ Network error: {e}"
# ───────────────────────────────
# UI
# ───────────────────────────────
@solara.component
def Page():
solara.lab.ThemeToggle()
# Theme-aware input appearance (no forced white)
solara.Style("""
.v-field { background-color: transparent !important; }
.v-text-field .v-field__overlay { background-color: transparent !important; }
""")
start_dt = datetime.now()
if not messages.value:
welcome_message()
solara.use_effect(restart_inactivity_timer, dependencies=[len(messages.value), input_text.value])
global last_user_message_index
user_messages = [m for m in messages.value if m["role"] == "user"]
if user_messages and len(user_messages) - 1 > last_user_message_index:
last_user_message_index = len(user_messages) - 1
restart_inactivity_timer()
with solara.Column(style={"height": "100dvh", "gap": "0"}):
# Header with progress
solara.Text(
f"Les {current_lesson.value}/10 – Fase {current_phase.value}/3",
style={"padding": "8px 12px", "fontWeight": "600"},
)
# Scrollable chat area
with solara.Card(style={"flex": 1, "minHeight": 0, "padding": 0}):
with solara.Div(style={"flex": 1, "minHeight": 0, "overflowY": "auto", "padding": "12px 12px 96px 12px"}):
with solara.lab.ChatBox():
for item in messages.value:
if item.get("invisible") or item["role"] == "system":
continue
with solara.lab.ChatMessage(
user=item["role"] == "user",
avatar=False,
name="Kompassie" if item["role"] == "assistant" else "Gebruiker",
color="rgba(0,0,0, 0.06)" if item["role"] == "assistant" else "#ff991f",
avatar_background_color="primary" if item["role"] == "assistant" else None,
border_radius="20px",
):
solara.Markdown(item["content"])
if promt_ai.pending or summary_promt_ai.pending:
solara.Text("Ik denk erover na...", style={"font-size": "1rem", "padding-left": "20px"})
solara.ProgressLinear()
if (user_message_count.value > 1) and (UI_mode.value == 0):
solara.Button(
"Samenvatting opvragen",
icon_name="mdi-file-document-edit-outline",
on_click=lambda: dialog0_open.set(True),
disabled=summary_promt_ai.pending,
)
ConfirmationDialog(
open=dialog0_open,
title="Bevestiging",
content="Weet je zeker dat je de samenvatting wilt opvragen? Hiermee wordt de afronding van het huidige gesprek in gang gezet.",
ok="OK",
cancel="Annuleren",
on_ok=lambda: summary_promt_ai(),
persistent=True,
)
if UI_mode.value > 0:
if UI_mode.value == 3:
solara.Markdown(
"Je hebt het gesprek afgerond.\n\n"
"Nieuw gesprek beginnen? [Klik dan hier.](https://py.cafe/app/AIPHeX/KompassieV2)\n\n# Samenvatting"
)
else:
if summary_promt_ai.pending:
solara.Markdown("Een momentje, ik ben de samenvatting aan het opstellen.\n# Samenvatting")
else:
solara.Markdown(
"Ik heb de samenvatting opgemaakt. Nog op- of aanmerkingen? "
"Laat het weten in de chat en ik ga ermee aan de slag.\n# Samenvatting"
)
solara.Markdown(summary_text.value)
solara.Button(
"Samenvatting goedkeuren",
icon_name="mdi-send-check-outline",
on_click=lambda: dialog_open.set(True),
disabled=(UI_mode.value == 3),
)
ConfirmationDialog(
open=dialog_open,
title="Verzendstatus",
content="Klik op OK als je de samenvatting wilt verzenden.",
ok="OK",
cancel="Annuleren",
on_ok=lambda: asyncio.create_task(send_email_async(start_dt)),
persistent=True,
)
if debug_mode.value:
solara.Text(f"{user_message_count.value} user messages received")
solara.Text(f"{UI_mode.value} UI mode")
if silence_remaining.value is not None:
solara.Text(f"Inactivity debug timer: {silence_remaining.value} sec")
if followup_remaining.value is not None:
solara.Text(f"Follow-up debug timer: {followup_remaining.value} sec")
# Sticky footer input (pinned to bottom, inherits theme)
with solara.Div(
style={
"position": "fixed",
"bottom": "0",
"left": "0",
"right": "0",
"padding": "10px 12px calc(10px + env(safe-area-inset-bottom))",
"background": "transparent",
"borderTop": "1px solid var(--v-border-color, rgba(0,0,0,0.08))",
"zIndex": "1000",
}
):
if UI_mode.value == 0:
with solara.Div(style={"display": "flex", "gap": "8px", "alignItems": "center"}):
solara.v.TextField(
label="Typ een bericht...",
v_model=input_text.value,
on_v_model=input_text.set,
disabled=promt_ai.pending,
on_keydown=lambda e: promt_ai(input_text.value) or input_text.set("") if e["key"] == "Enter" else None,
style={"flex": 1, "minWidth": "0"}, # background handled by CSS
)
solara.Button(
"Verstuur",
icon_name="mdi-send",
on_click=lambda: promt_ai(input_text.value) or input_text.set(""),
disabled=promt_ai.pending or not input_text.value.strip(),
)
elif (user_message_count.value > 1) and (UI_mode.value > 0) and (UI_mode.value != 3):
with solara.Div(style={"display": "flex", "gap": "8px", "alignItems": "center"}):
solara.v.TextField(
label="Type je feedback op de samenvatting...",
v_model=input_text.value,
on_v_model=input_text.set,
disabled=review_promt_ai.pending,
on_keydown=lambda e: review_promt_ai(input_text.value) or input_text.set("") if e["key"] == "Enter" else None,
style={"flex": 1, "minWidth": "0"},
)
solara.Button(
"Verstuur",
icon_name="mdi-send",
on_click=lambda: review_promt_ai(input_text.value) or input_text.set(""),
disabled=review_promt_ai.pending or not input_text.value.strip(),
)