@@ -75,8 +75,8 @@ def notify_event_received(self, request_id: str) -> None:
75
75
self .input_events [request_id ] = event
76
76
77
77
78
- def aprint (output : str , end : str = "\n " ) -> Awaitable [None ]:
79
- return asyncio .to_thread (print , output , end = end )
78
+ def aprint (output : str , end : str = "\n " , flush : bool = False ) -> Awaitable [None ]:
79
+ return asyncio .to_thread (print , output , end = end , flush = flush )
80
80
81
81
82
82
async def Console (
@@ -126,7 +126,7 @@ async def Console(
126
126
f"Total completion tokens: { total_usage .completion_tokens } \n "
127
127
f"Duration: { duration :.2f} seconds\n "
128
128
)
129
- await aprint (output , end = "" )
129
+ await aprint (output , end = "" , flush = True )
130
130
131
131
# mypy ignore
132
132
last_processed = message # type: ignore
@@ -141,7 +141,7 @@ async def Console(
141
141
output += f"[Prompt tokens: { message .chat_message .models_usage .prompt_tokens } , Completion tokens: { message .chat_message .models_usage .completion_tokens } ]\n "
142
142
total_usage .completion_tokens += message .chat_message .models_usage .completion_tokens
143
143
total_usage .prompt_tokens += message .chat_message .models_usage .prompt_tokens
144
- await aprint (output , end = "" )
144
+ await aprint (output , end = "" , flush = True )
145
145
146
146
# Print summary.
147
147
if output_stats :
@@ -156,7 +156,7 @@ async def Console(
156
156
f"Total completion tokens: { total_usage .completion_tokens } \n "
157
157
f"Duration: { duration :.2f} seconds\n "
158
158
)
159
- await aprint (output , end = "" )
159
+ await aprint (output , end = "" , flush = True )
160
160
161
161
# mypy ignore
162
162
last_processed = message # type: ignore
@@ -169,23 +169,24 @@ async def Console(
169
169
message = cast (AgentEvent | ChatMessage , message ) # type: ignore
170
170
if not streaming_chunks :
171
171
# Print message sender.
172
- await aprint (f"{ '-' * 10 } { message .source } { '-' * 10 } " , end = "\n " )
172
+ await aprint (f"{ '-' * 10 } { message .source } { '-' * 10 } " , end = "\n " , flush = True )
173
173
if isinstance (message , ModelClientStreamingChunkEvent ):
174
174
await aprint (message .content , end = "" )
175
175
streaming_chunks .append (message .content )
176
176
else :
177
177
if streaming_chunks :
178
178
streaming_chunks .clear ()
179
179
# Chunked messages are already printed, so we just print a newline.
180
- await aprint ("" , end = "\n " )
180
+ await aprint ("" , end = "\n " , flush = True )
181
181
else :
182
182
# Print message content.
183
- await aprint (_message_to_str (message , render_image_iterm = render_image_iterm ), end = "\n " )
183
+ await aprint (_message_to_str (message , render_image_iterm = render_image_iterm ), end = "\n " , flush = True )
184
184
if message .models_usage :
185
185
if output_stats :
186
186
await aprint (
187
187
f"[Prompt tokens: { message .models_usage .prompt_tokens } , Completion tokens: { message .models_usage .completion_tokens } ]" ,
188
188
end = "\n " ,
189
+ flush = True ,
189
190
)
190
191
total_usage .completion_tokens += message .models_usage .completion_tokens
191
192
total_usage .prompt_tokens += message .models_usage .prompt_tokens
0 commit comments