Py.Cafe

AIPHeX/

chatbot-3

GPT-4 Chatbot with OpenAI Streaming

DocsPricing
  • app.py
  • requirements.txt
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
"""
Chatbot (py.cafe + Solara)

Minimal Solara chat UI that streams responses from OpenAI.
Uses a py.cafe secret called OPENAI_API_KEY when available.
"""

import os
from typing import List, cast

import solara
import solara.lab

from openai import AsyncOpenAI
from openai.types.chat import ChatCompletionMessageParam
from typing_extensions import TypedDict


class MessageDict(TypedDict):
    role: str  # "user" or "assistant"
    content: str


messages: solara.Reactive[List[MessageDict]] = solara.reactive([])

# Read key from py.cafe secret (preferred) or env var (fallback)
OPENAI_API_KEY = None
try:
    import pycafe

    OPENAI_API_KEY = pycafe.get_secret(
        "OPENAI_API_KEY",
        """We need an OpenAI API key to generate text.

Set a py.cafe secret named OPENAI_API_KEY.

""",
    )
except ModuleNotFoundError:
    OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")

openai = AsyncOpenAI(api_key=OPENAI_API_KEY) if OPENAI_API_KEY else None


def no_api_key_message():
    messages.value = [
        {
            "role": "assistant",
            "content": "No OpenAI API key found. Set a py.cafe secret called OPENAI_API_KEY.",
        },
    ]


@solara.lab.task
async def promt_ai(user_message: str):
    if openai is None:
        no_api_key_message()
        return

    # Append user message
    messages.value = [*messages.value, {"role": "user", "content": user_message}]

    # Start streaming completion
    try:
        response = await openai.chat.completions.create(
            model="gpt-5.2",
            messages=cast(List[ChatCompletionMessageParam], messages.value),
            stream=True,
        )
    except Exception as e:
        messages.value = [
            *messages.value,
            {"role": "assistant", "content": f"Request failed: {e}"},
        ]
        return

    # Placeholder assistant message (for live streaming updates)
    messages.value = [*messages.value, {"role": "assistant", "content": ""}]

    try:
        async for chunk in response:
            choice = chunk.choices[0]

            # Stop if done
            if getattr(choice, "finish_reason", None) == "stop":
                return

            delta = choice.delta.content
            if not delta:
                continue

            # Append streamed delta to the last assistant message
            messages.value = [
                *messages.value[:-1],
                {"role": "assistant", "content": messages.value[-1]["content"] + delta},
            ]
    except Exception as e:
        # Replace placeholder with error message
        messages.value = [
            *messages.value[:-1],
            {"role": "assistant", "content": f"Streaming failed: {e}"},
        ]


@solara.component
def Page():
    with solara.Column(style={"width": "100%", "height": "60vh"}):
        with solara.lab.ChatBox():
            for item in messages.value:
                with solara.lab.ChatMessage(
                    user=item["role"] == "user",
                    avatar=False,
                    name="ChatGPT" if item["role"] == "assistant" else "User",
                    color="rgba(0,0,0, 0.06)" if item["role"] == "assistant" else "#ff991f",
                    avatar_background_color="primary" if item["role"] == "assistant" else None,
                    border_radius="20px",
                ):
                    solara.Markdown(item["content"])

        if promt_ai.pending:
            solara.Text("I'm thinking...", style={"font-size": "1rem", "padding-left": "20px"})
            solara.ProgressLinear()

        solara.lab.ChatInput(send_callback=promt_ai, disabled_send=promt_ai.pending, autofocus=True).key("input")