Skip to content

Commit 5be190c

Browse files
committed
Flush console output after every message.
1 parent 3c30d89 commit 5be190c

File tree

1 file changed

+9
-8
lines changed
  • python/packages/autogen-agentchat/src/autogen_agentchat/ui

1 file changed

+9
-8
lines changed

Diff for: python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py

+9-8
Original file line numberDiff line numberDiff line change
@@ -75,8 +75,8 @@ def notify_event_received(self, request_id: str) -> None:
7575
self.input_events[request_id] = event
7676

7777

78-
def aprint(output: str, end: str = "\n") -> Awaitable[None]:
79-
return asyncio.to_thread(print, output, end=end)
78+
def aprint(output: str, end: str = "\n", flush: bool = False) -> Awaitable[None]:
79+
return asyncio.to_thread(print, output, end=end, flush=flush)
8080

8181

8282
async def Console(
@@ -126,7 +126,7 @@ async def Console(
126126
f"Total completion tokens: {total_usage.completion_tokens}\n"
127127
f"Duration: {duration:.2f} seconds\n"
128128
)
129-
await aprint(output, end="")
129+
await aprint(output, end="", flush=True)
130130

131131
# mypy ignore
132132
last_processed = message # type: ignore
@@ -141,7 +141,7 @@ async def Console(
141141
output += f"[Prompt tokens: {message.chat_message.models_usage.prompt_tokens}, Completion tokens: {message.chat_message.models_usage.completion_tokens}]\n"
142142
total_usage.completion_tokens += message.chat_message.models_usage.completion_tokens
143143
total_usage.prompt_tokens += message.chat_message.models_usage.prompt_tokens
144-
await aprint(output, end="")
144+
await aprint(output, end="", flush=True)
145145

146146
# Print summary.
147147
if output_stats:
@@ -156,7 +156,7 @@ async def Console(
156156
f"Total completion tokens: {total_usage.completion_tokens}\n"
157157
f"Duration: {duration:.2f} seconds\n"
158158
)
159-
await aprint(output, end="")
159+
await aprint(output, end="", flush=True)
160160

161161
# mypy ignore
162162
last_processed = message # type: ignore
@@ -169,23 +169,24 @@ async def Console(
169169
message = cast(AgentEvent | ChatMessage, message) # type: ignore
170170
if not streaming_chunks:
171171
# Print message sender.
172-
await aprint(f"{'-' * 10} {message.source} {'-' * 10}", end="\n")
172+
await aprint(f"{'-' * 10} {message.source} {'-' * 10}", end="\n", flush=True)
173173
if isinstance(message, ModelClientStreamingChunkEvent):
174174
await aprint(message.content, end="")
175175
streaming_chunks.append(message.content)
176176
else:
177177
if streaming_chunks:
178178
streaming_chunks.clear()
179179
# Chunked messages are already printed, so we just print a newline.
180-
await aprint("", end="\n")
180+
await aprint("", end="\n", flush=True)
181181
else:
182182
# Print message content.
183-
await aprint(_message_to_str(message, render_image_iterm=render_image_iterm), end="\n")
183+
await aprint(_message_to_str(message, render_image_iterm=render_image_iterm), end="\n", flush=True)
184184
if message.models_usage:
185185
if output_stats:
186186
await aprint(
187187
f"[Prompt tokens: {message.models_usage.prompt_tokens}, Completion tokens: {message.models_usage.completion_tokens}]",
188188
end="\n",
189+
flush=True,
189190
)
190191
total_usage.completion_tokens += message.models_usage.completion_tokens
191192
total_usage.prompt_tokens += message.models_usage.prompt_tokens

0 commit comments

Comments
 (0)