Archime commited on
Commit
9d3beef
·
1 Parent(s): 3532025

impl on_additional_outputs

Browse files
Files changed (1) hide show
  1. app.py +161 -323
app.py CHANGED
@@ -5,380 +5,218 @@ import asyncio
5
  from fastrtc.webrtc import WebRTC
6
  from pydub import AudioSegment
7
  import time
8
- import os
9
- import json
10
- import spaces
11
- from app.utils import generate_coturn_config
12
-
13
- from app.session_utils import (
14
- TMP_DIR,
15
- generate_session_id,
16
- register_session,
17
- unregister_session,
18
- get_active_sessions,
19
- stop_file_path,
20
- create_stop_flag,
21
- clear_stop_flag,
22
- reset_all_active_sessions,
23
- on_load,
24
- on_unload
25
 
 
 
 
26
  )
27
 
28
- # Reset sessions at startup
29
- reset_all_active_sessions()
30
-
31
  EXAMPLE_FILES = ["data/bonjour.wav", "data/bonjour2.wav"]
32
- DEFAULT_FILE = EXAMPLE_FILES[0]
33
-
34
-
35
- # --------------------------------------------------------
36
- # STREAMING
37
- # --------------------------------------------------------
38
- def read_and_stream_audio(filepath_to_stream: str, session_id: str, chunk_seconds: float):
39
- """Stream audio chunks and save .npz files only when transcription is active."""
40
- stop_file = os.path.join(TMP_DIR, f"stream_stop_flag_{session_id}.txt")
41
- transcribe_flag = os.path.join(TMP_DIR, f"transcribe_active_{session_id}.txt")
42
-
43
- logging.debug(f"[{session_id}] read_and_stream_audio() started with file: {filepath_to_stream}")
44
-
45
  if not filepath_to_stream or not os.path.exists(filepath_to_stream):
46
- logging.error(f"[{session_id}] Audio file not found: {filepath_to_stream}")
47
- return
48
- clear_stop_flag(session_id)
49
- register_session(session_id, filepath_to_stream)
50
- progress_path = os.path.join(TMP_DIR, f"progress_{session_id}.json")
51
- chunk_dir = os.path.join(TMP_DIR, f"chunks_{session_id}")
52
- os.makedirs(chunk_dir, exist_ok=True)
53
-
 
 
 
 
 
54
  try:
55
  segment = AudioSegment.from_file(filepath_to_stream)
56
- chunk_ms = int(chunk_seconds * 1000)
57
- total_chunks = len(segment) // chunk_ms + 1
58
 
59
- logging.info(f"[{session_id}] Streaming {total_chunks} chunks ({chunk_seconds:.2f}s each)...")
 
 
60
 
61
- for i, chunk in enumerate(segment[::chunk_ms], start=1):
62
- if os.path.exists(stop_file):
63
- logging.info(f"[{session_id}] Stop flag detected at chunk {i}. Ending stream.")
64
- clear_stop_flag(session_id)
65
  break
66
- logging.info(f"[{session_id}] Streaming chunk {i}.")
67
- iter_start = time.perf_counter()
68
 
69
- elapsed_s = i * chunk_seconds
70
- hours, remainder = divmod(int(elapsed_s), 3600)
71
- minutes, seconds = divmod(remainder, 60)
72
- elapsed_str = f"{hours:02d}:{minutes:02d}:{seconds:02d}"
73
-
74
- percent = round((i / total_chunks) * 100, 2)
75
- progress_data = {"value": percent, "elapsed": elapsed_str}
76
- with open(progress_path, "w") as f:
77
- json.dump(progress_data, f)
78
- chunk_array = np.array(chunk.get_array_of_samples(), dtype=np.int16)
79
- rate = chunk.frame_rate
80
-
81
- # Save only if transcription is active
82
- if os.path.exists(transcribe_flag):
83
- npz_path = os.path.join(chunk_dir, f"chunk_{i:05d}.npz")
84
- np.savez_compressed(npz_path, data=chunk_array, rate=rate)
85
- logging.debug(f"[{session_id}] Saved chunk {i}/{total_chunks} (transcribe active)")
86
-
87
- # Stream audio to client
88
- yield (rate, chunk_array.reshape(1, -1))
89
-
90
- process_ms = (time.perf_counter() - iter_start) * 1000
91
- time.sleep(max(chunk_seconds - (process_ms / 1000.0) - 0.1, 0.01))
92
-
93
- logging.info(f"[{session_id}] Streaming completed successfully.")
94
-
95
- except Exception as e:
96
- logging.error(f"[{session_id}] Stream error: {e}", exc_info=True)
97
- finally:
98
- unregister_session(session_id)
99
- clear_stop_flag(session_id)
100
- if os.path.exists(progress_path):
101
- os.remove(progress_path)
102
-
103
-
104
- # --------------------------------------------------------
105
- # TRANSCRIPTION
106
- # --------------------------------------------------------
107
- @spaces.GPU
108
- def transcribe(session_id: str):
109
- """Continuously read and delete .npz chunks while transcription is active."""
110
- active_flag = os.path.join(TMP_DIR, f"transcribe_active_{session_id}.txt")
111
- chunk_dir = os.path.join(TMP_DIR, f"chunks_{session_id}")
112
-
113
- if not os.path.exists(chunk_dir):
114
- logging.warning(f"[{session_id}] No chunk directory found for transcription.")
115
- return
116
-
117
- logging.info(f"[{session_id}] Transcription loop started.")
118
- try:
119
- while os.path.exists(active_flag):
120
- files = sorted(f for f in os.listdir(chunk_dir) if f.endswith(".npz"))
121
- if not files:
122
- time.sleep(0.25)
123
- continue
124
-
125
- for fname in files:
126
- fpath = os.path.join(chunk_dir, fname)
127
- try:
128
- npz = np.load(fpath)
129
- samples = npz["data"]
130
- rate = int(npz["rate"])
131
-
132
- text = f"Transcribed {fname}: {len(samples)} samples @ {rate}Hz"
133
- logging.debug(f"[{session_id}] {text}")
134
-
135
- os.remove(fpath)
136
- logging.debug(f"[{session_id}] Deleted processed chunk: {fname}")
137
- except Exception as e:
138
- logging.error(f"[{session_id}] Error processing {fname}: {e}")
139
- continue
140
 
141
- time.sleep(0.25)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
 
143
- logging.info(f"[{session_id}] Transcription loop ended (flag removed).")
144
 
 
 
 
 
 
145
  except Exception as e:
146
- logging.error(f"[{session_id}] Transcription error: {e}", exc_info=True)
 
147
  finally:
148
- try:
149
- if os.path.exists(chunk_dir) and not os.listdir(chunk_dir):
150
- os.rmdir(chunk_dir)
151
- logging.debug(f"[{session_id}] Cleaned up empty chunk dir.")
152
- except Exception as e:
153
- logging.error(f"[{session_id}] Cleanup error: {e}")
154
- logging.info(f"[{session_id}] Exiting transcription loop.")
155
 
156
 
157
- # --------------------------------------------------------
158
- # STOP STREAMING
159
- # --------------------------------------------------------
160
- def stop_streaming(session_id: str):
161
- create_stop_flag(session_id)
162
- logging.info(f"[{session_id}] Stop button clicked → stop flag created.")
163
  return None
164
 
 
165
 
166
- def get_session_progress(session_id: str):
167
- """Read streaming progress and return slider position + elapsed time."""
168
- progress_path = os.path.join(TMP_DIR, f"progress_{session_id}.json")
169
- if not os.path.exists(progress_path):
170
- return 0.0, "00:00:00"
171
- try:
172
- with open(progress_path, "r") as f:
173
- data = json.load(f)
174
- value = data.get("value", 0.0)
175
- elapsed = data.get("elapsed", "00:00:00")
176
- return value, elapsed
177
- except Exception:
178
- return 0.0, "00:00:00"
179
-
180
-
181
- # --------------------------------------------------------
182
- # UI
183
- # --------------------------------------------------------
184
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
185
  gr.Markdown(
186
- "## 🎧 WebRTC Audio Streamer (Multi-user)\n"
187
- "Each user controls their own stream. Transcription runs only during streaming."
 
188
  )
189
 
190
- session_id = gr.State()
191
- sid_box = gr.Textbox(label="Session ID", interactive=False)
192
- demo.load(fn=on_load, inputs=None, outputs=[session_id, sid_box])
193
- demo.unload(on_unload)
194
  active_filepath = gr.State(value=DEFAULT_FILE)
195
 
196
- with gr.Row(equal_height=True):
197
- with gr.Column(elem_id="column_source", scale=1):
198
- with gr.Group(elem_id="centered_content"):
199
- main_audio = gr.Audio(
200
- label="Audio Source",
201
- sources=["upload", "microphone"],
202
- type="filepath",
203
- value=DEFAULT_FILE,
204
- )
205
-
206
- chunk_slider = gr.Slider(
207
- label="Chunk Duration (seconds)",
208
- minimum=0.5,
209
- maximum=5.0,
210
- value=1.0,
211
- step=0.5,
212
- interactive=True,
213
- )
214
-
215
- progress_bar = gr.Slider(
216
- label="Streaming Progress (%)",
217
- minimum=0,
218
- maximum=100,
219
- value=0,
220
- step=0.1,
221
- interactive=False,
222
- visible=False,
223
- )
224
-
225
- progress_text = gr.Textbox(
226
- label="Elapsed Time (hh:mm:ss)",
227
- interactive=False,
228
- visible=False,
229
- )
230
-
231
- with gr.Row():
232
- start_button = gr.Button("▶️ Start Streaming", variant="primary")
233
- stop_button = gr.Button("⏹️ Stop Streaming", variant="stop", interactive=False)
234
-
235
  with gr.Column():
236
  webrtc_stream = WebRTC(
237
- label="Audio Stream",
238
  mode="receive",
239
  modality="audio",
240
  rtc_configuration=generate_coturn_config(),
241
- visible=True,
 
242
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243
 
244
- # --- Transcription Controls ---
245
- with gr.Row(equal_height=True):
246
- with gr.Column():
247
- start_transcribe = gr.Button("🎙️ Start Transcribe", interactive=False)
248
- stop_transcribe = gr.Button("🛑 Stop Transcribe", interactive=False)
249
 
250
- # --- UI Logic ---
251
- def start_streaming_ui(session_id):
 
252
  return {
253
  start_button: gr.Button(interactive=False),
254
  stop_button: gr.Button(interactive=True),
255
- start_transcribe: gr.Button(interactive=True),
256
- stop_transcribe: gr.Button(interactive=False),
257
- chunk_slider: gr.Slider(interactive=False),
258
- main_audio: gr.Audio(visible=False),
259
- progress_bar: gr.Slider(value=0, visible=True),
260
- progress_text: gr.Textbox(value="00:00:00", visible=True),
261
  }
262
 
263
- def stop_streaming_ui(session_id):
264
- logging.debug(f"[{session_id}] UI: Stop clicked restoring controls.")
265
-
266
  return {
267
  start_button: gr.Button(interactive=True),
268
  stop_button: gr.Button(interactive=False),
269
- start_transcribe: gr.Button(interactive=False),
270
- stop_transcribe: gr.Button(interactive=False),
271
- chunk_slider: gr.Slider(interactive=True),
272
- main_audio: gr.Audio(visible=True),
273
- progress_bar: gr.Slider(value=0, visible=False),
274
- progress_text: gr.Textbox(value="00:00:00", visible=False),
275
- }
276
-
277
- # --- Streaming event ---
278
- webrtc_stream.stream(
 
 
 
 
 
 
279
  fn=read_and_stream_audio,
280
- inputs=[active_filepath, session_id, chunk_slider],
281
  outputs=[webrtc_stream],
282
  trigger=start_button.click,
283
- concurrency_limit=20,
284
- concurrency_id="receive"
285
- )
286
-
287
- start_button.click(fn=start_streaming_ui, inputs=[session_id], outputs=[
288
- start_button, stop_button, start_transcribe, stop_transcribe,
289
- chunk_slider, main_audio, progress_bar, progress_text,
290
- ])
291
-
292
- stop_button.click(fn=stop_streaming, inputs=[session_id], outputs=[webrtc_stream]).then(
293
- fn=stop_streaming_ui,
294
- inputs=[session_id],
295
- outputs=[
296
- start_button, stop_button, start_transcribe, stop_transcribe,
297
- chunk_slider, main_audio, progress_bar, progress_text,
298
- ],
299
- )
300
-
301
- # --- Transcription control logic ---
302
- def start_transcribe_ui(session_id: str):
303
- """Create transcription flag and update UI."""
304
- start_flag = os.path.join(TMP_DIR, f"transcribe_active_{session_id}.txt")
305
- with open(start_flag, "w") as f:
306
- f.write("1")
307
- logging.info(f"[{session_id}] Transcription started.")
308
- return {
309
- start_transcribe: gr.Button(interactive=False),
310
- stop_transcribe: gr.Button(interactive=True),
311
- progress_text: gr.Textbox(value="🎙️ Transcription started..."),
312
- }
313
-
314
- def stop_transcribe_ui(session_id: str):
315
- """Stop transcription by removing flag and update UI."""
316
- flag_path = os.path.join(TMP_DIR, f"transcribe_active_{session_id}.txt")
317
- if os.path.exists(flag_path):
318
- os.remove(flag_path)
319
- logging.info(f"[{session_id}] Transcription stopped.")
320
- return {
321
- start_transcribe: gr.Button(interactive=True),
322
- stop_transcribe: gr.Button(interactive=False),
323
- progress_text: gr.Textbox(value="🛑 Transcription stopped."),
324
- }
325
-
326
- # --- UI binding ---
327
- start_transcribe.click(
328
- fn=start_transcribe_ui,
329
- inputs=[session_id],
330
- outputs=[start_transcribe, stop_transcribe, progress_text],
331
  )
332
 
333
- # 🔥 Actual transcription loop launch
334
- start_transcribe.click(
335
- fn=transcribe,
336
- inputs=[session_id],
337
- outputs=None,
338
  )
339
 
340
- stop_transcribe.click(
341
- fn=stop_transcribe_ui,
342
- inputs=[session_id],
343
- outputs=[start_transcribe, stop_transcribe, progress_text],
 
 
 
 
344
  )
345
 
346
- # --- Active sessions ---
347
- with gr.Accordion("📊 Active Sessions", open=False):
348
- sessions_table = gr.DataFrame(
349
- headers=["session_id", "file", "start_time", "status"],
350
- interactive=False,
351
- wrap=True,
352
- max_height=200,
353
- )
354
-
355
- gr.Timer(3.0).tick(fn=get_active_sessions, outputs=sessions_table)
356
- gr.Timer(1.0).tick(fn=get_session_progress, inputs=[session_id], outputs=[progress_bar, progress_text])
357
 
358
- # --------------------------------------------------------
359
- # CSS
360
- # --------------------------------------------------------
361
- custom_css = """
362
- #column_source {
363
- display: flex;
364
- flex-direction: column;
365
- justify-content: center;
366
- align-items: center;
367
- gap: 1rem;
368
- margin-top: auto;
369
- margin-bottom: auto;
370
- }
371
- #column_source .gr-row {
372
- padding-top: 12px;
373
- padding-bottom: 12px;
374
- }
375
- """
376
- demo.css = custom_css
377
-
378
-
379
- # --------------------------------------------------------
380
- # MAIN
381
- # --------------------------------------------------------
382
  if __name__ == "__main__":
383
-
384
- demo.queue(max_size=20, api_open=False).launch(show_api=False, debug=True)
 
5
  from fastrtc.webrtc import WebRTC
6
  from pydub import AudioSegment
7
  import time
8
+ import os
9
+ from gradio.utils import get_space
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
+ from app.logger_config import logger as logging
12
+ from app.utils import (
13
+ generate_coturn_config
14
  )
15
 
 
 
 
16
  EXAMPLE_FILES = ["data/bonjour.wav", "data/bonjour2.wav"]
17
+ DEFAULT_FILE = EXAMPLE_FILES[0]
18
+ # Utilisé pour signaler l'arrêt du streaming à l'intérieur du générateur
19
+ stop_stream_state = gr.State(value=False)
20
+
21
+ def read_and_stream_audio(filepath_to_stream: str):
22
+ """
23
+ Un générateur synchrone qui lit un fichier audio (via filepath_to_stream)
24
+ et le streame chunk par chunk d'1 seconde.
25
+ """
26
+
 
 
 
27
  if not filepath_to_stream or not os.path.exists(filepath_to_stream):
28
+ logging.error(f"Fichier audio non trouvé ou non spécifié : {filepath_to_stream}")
29
+ # Tenter d'utiliser le fichier par défaut en cas de problème
30
+ if os.path.exists(DEFAULT_FILE):
31
+ logging.warning(f"Utilisation du fichier par défaut : {DEFAULT_FILE}")
32
+ filepath_to_stream = DEFAULT_FILE
33
+ else:
34
+ logging.error("Fichier par défaut non trouvé. Arrêt du stream.")
35
+ return
36
+
37
+ logging.info(f"Préparation du segment audio depuis : {filepath_to_stream}")
38
+ # Réinitialiser le signal d'arrêt à chaque lancement
39
+ stop_stream_state.value = False
40
+
41
  try:
42
  segment = AudioSegment.from_file(filepath_to_stream)
43
+ chunk_duree_ms = 1000
44
+ logging.info(f"Début du streaming en chunks de {chunk_duree_ms}ms...")
45
 
46
+ for i, chunk in enumerate(segment[::chunk_duree_ms]):
47
+ iter_start_time = time.perf_counter()
48
+ logging.info(f"Envoi du chunk {i+1}...")
49
 
50
+ if stop_stream_state.value:
51
+ logging.info("Signal d'arrêt reçu, arrêt de la boucle.")
 
 
52
  break
 
 
53
 
54
+ output_chunk = (
55
+ chunk.frame_rate,
56
+ np.array(chunk.get_array_of_samples()).reshape(1, -1),
57
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
 
59
+ yield output_chunk
60
+
61
+ iter_end_time = time.perf_counter()
62
+ processing_duration_ms = (iter_end_time - iter_start_time) * 1000
63
+
64
+ sleep_duration = (chunk_duree_ms / 1000.0) - (processing_duration_ms / 1000.0) - 0.1
65
+ if sleep_duration < 0:
66
+ sleep_duration = 0.01 # Éviter un temps de sommeil négatif
67
+
68
+ logging.debug(f"Temps de traitement: {processing_duration_ms:.2f}ms, Sommeil: {sleep_duration:.2f}s")
69
+
70
+ elapsed = 0.0
71
+ interval = 0.05
72
+ while elapsed < sleep_duration:
73
+ if stop_stream_state.value:
74
+ logging.info("Signal d'arrêt reçu pendant l'attente.")
75
+ break
76
+ wait_chunk = min(interval, sleep_duration - elapsed)
77
+ time.sleep(wait_chunk)
78
+ elapsed += wait_chunk
79
+ if stop_stream_state.value:
80
+ break
81
 
82
+ logging.info("Streaming terminé.")
83
 
84
+ except asyncio.CancelledError:
85
+ logging.info("Stream arrêté par l'utilisateur (CancelledError).")
86
+ raise
87
+ except FileNotFoundError:
88
+ logging.error(f"Erreur critique : Fichier non trouvé : {filepath_to_stream}")
89
  except Exception as e:
90
+ logging.error(f"Erreur pendant le stream: {e}", exc_info=True)
91
+ raise
92
  finally:
93
+ stop_stream_state.value = False
94
+ logging.info("Signal d'arrêt nettoyé.")
 
 
 
 
 
95
 
96
 
97
+ def stop_streaming():
98
+ """Active le signal d'arrêt pour le générateur."""
99
+ logging.info("Bouton Stop cliqué: envoi du signal d'arrêt.")
100
+ stop_stream_state.value = True
 
 
101
  return None
102
 
103
+ # --- Interface Gradio ---
104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
106
  gr.Markdown(
107
+ "## Application 'Streamer' WebRTC (Serveur -> Client)\n"
108
+ "Utilisez l'exemple fourni, uploadez un fichier ou enregistrez depuis votre micro, "
109
+ "puis cliquez sur 'Start' pour écouter le stream."
110
  )
111
 
112
+ # 1. État pour stocker le chemin du fichier à lire
 
 
 
113
  active_filepath = gr.State(value=DEFAULT_FILE)
114
 
115
+ with gr.Row():
116
+ with gr.Column():
117
+ main_audio = gr.Audio(
118
+ label="Source Audio",
119
+ sources=["upload", "microphone"], # Combine les deux sources
120
+ type="filepath",
121
+ value=DEFAULT_FILE, # Défaut au premier exemple
122
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  with gr.Column():
124
  webrtc_stream = WebRTC(
125
+ label="Stream Audio",
126
  mode="receive",
127
  modality="audio",
128
  rtc_configuration=generate_coturn_config(),
129
+ visible=True, # Caché par défaut
130
+ height = 200,
131
  )
132
+ # 4. Boutons de contrôle
133
+ with gr.Row():
134
+ with gr.Column():
135
+ start_button = gr.Button("Start Streaming", variant="primary")
136
+ stop_button = gr.Button("Stop Streaming", variant="stop", interactive=False)
137
+ with gr.Column():
138
+ gr.Text()
139
+
140
+ def set_new_file(filepath):
141
+ """Met à jour l'état avec le nouveau chemin, ou revient au défaut si None."""
142
+ if filepath is None:
143
+ logging.info("Audio effacé, retour au fichier d'exemple par défaut.")
144
+ new_path = DEFAULT_FILE
145
+ else:
146
+ logging.info(f"Nouvelle source audio sélectionnée : {filepath}")
147
+ new_path = filepath
148
+ # Retourne la valeur à mettre dans le gr.State
149
+ return new_path
150
+
151
+ # Mettre à jour le chemin si l'utilisateur upload, efface, ou change le fichier
152
+ main_audio.change(
153
+ fn=set_new_file,
154
+ inputs=[main_audio],
155
+ outputs=[active_filepath]
156
+ )
157
+
158
+ # Mettre à jour le chemin si l'utilisateur termine un enregistrement
159
+ main_audio.stop_recording(
160
+ fn=set_new_file,
161
+ inputs=[main_audio],
162
+ outputs=[active_filepath]
163
+ )
164
 
 
 
 
 
 
165
 
166
+ # Fonctions pour mettre à jour l'état de l'interface
167
+ def start_streaming_ui():
168
+ logging.info("UI : Démarrage du streaming. Désactivation des contrôles.")
169
  return {
170
  start_button: gr.Button(interactive=False),
171
  stop_button: gr.Button(interactive=True),
172
+ main_audio: gr.Audio(visible=False),
 
 
 
 
 
173
  }
174
 
175
+ def stop_streaming_ui():
176
+ logging.info("UI : Arrêt du streaming. Réactivation des contrôles.")
 
177
  return {
178
  start_button: gr.Button(interactive=True),
179
  stop_button: gr.Button(interactive=False),
180
+ main_audio: gr.Audio(
181
+ label="Source Audio",
182
+ sources=["upload", "microphone"], # Combine les deux sources
183
+ type="filepath",
184
+ value=active_filepath.value,
185
+ visible=True
186
+ ),
187
+ }
188
+
189
+
190
+ ui_components = [
191
+ start_button, stop_button,
192
+ main_audio,
193
+ ]
194
+
195
+ stream_event = webrtc_stream.stream(
196
  fn=read_and_stream_audio,
197
+ inputs=[active_filepath],
198
  outputs=[webrtc_stream],
199
  trigger=start_button.click,
200
+ concurrency_id="audio_stream", # ID de concurrence
201
+ concurrency_limit=10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
  )
203
 
204
+ # Mettre à jour l'interface au clic sur START
205
+ start_button.click(
206
+ fn=start_streaming_ui,
207
+ outputs=ui_components
 
208
  )
209
 
210
+ # Correction : S'assurer que le stream est bien annulé
211
+ stop_button.click(
212
+ fn=stop_streaming,
213
+ outputs=[webrtc_stream],
214
+ ).then(
215
+ fn=stop_streaming_ui, # ENSUITE, mettre à jour l'interface
216
+ inputs=None,
217
+ outputs=ui_components
218
  )
219
 
 
 
 
 
 
 
 
 
 
 
 
220
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
  if __name__ == "__main__":
222
+ demo.queue(max_size=10, api_open=False).launch(show_api=False, debug=True)