1#if defined(CONF_VIDEORECORDER)
2
3#include "video.h"
4
5#include <base/log.h>
6
7#include <engine/graphics.h>
8#include <engine/shared/config.h>
9#include <engine/sound.h>
10#include <engine/storage.h>
11
12extern "C" {
13#include <libavutil/avutil.h>
14#include <libavutil/opt.h>
15#include <libswresample/swresample.h>
16#include <libswscale/swscale.h>
17};
18
19#include <chrono>
20#include <memory>
21#include <mutex>
22#include <thread>
23
24using namespace std::chrono_literals;
25
26// This code is mostly stolen from https://github.com/FFmpeg/FFmpeg/blob/master/doc/examples/muxing.c
27
28static const enum AVColorSpace COLOR_SPACE = AVCOL_SPC_BT709;
29// AVCodecContext->colorspace is an enum AVColorSpace but sws_getCoefficients
30// wants an SWS_CS_* macro. Both sets of constants follow H.273 numbering
31// and hence agree, but we assert that they're equal here to be sure.
32static_assert(COLOR_SPACE == SWS_CS_ITU709);
33
34static LEVEL AvLevelToLogLevel(int Level)
35{
36 switch(Level)
37 {
38 case AV_LOG_PANIC:
39 case AV_LOG_FATAL:
40 case AV_LOG_ERROR:
41 return LEVEL_ERROR;
42 case AV_LOG_WARNING:
43 return LEVEL_WARN;
44 case AV_LOG_INFO:
45 return LEVEL_INFO;
46 case AV_LOG_VERBOSE:
47 case AV_LOG_DEBUG:
48 return LEVEL_DEBUG;
49 case AV_LOG_TRACE:
50 return LEVEL_TRACE;
51 default:
52 dbg_assert_failed("invalid log level: %d", Level);
53 }
54}
55
56[[gnu::format(printf, 3, 0)]] static void AvLogCallback(void *pUser, int Level, const char *pFormat, va_list VarArgs)
57{
58 const LEVEL LogLevel = AvLevelToLogLevel(Level);
59 if(LogLevel <= LEVEL_INFO)
60 {
61 char aLog[4096]; // Longest log line length
62 int Length = str_format_v(buffer: aLog, buffer_size: sizeof(aLog), format: pFormat, args: VarArgs);
63 if(Length > 0)
64 {
65 if(aLog[Length - 1] == '\n')
66 {
67 aLog[Length - 1] = '\0';
68 }
69 log_log(level: LogLevel, sys: "videorecorder/libav", fmt: "%s", aLog);
70 }
71 }
72}
73
74void CVideo::Init()
75{
76 av_log_set_callback(callback: AvLogCallback);
77}
78
79CVideo::CVideo(IGraphics *pGraphics, ISound *pSound, IStorage *pStorage, int Width, int Height, const char *pName) :
80 m_pGraphics(pGraphics),
81 m_pStorage(pStorage),
82 m_pSound(pSound)
83{
84 m_pFormatContext = nullptr;
85 m_pFormat = nullptr;
86 m_pOptDict = nullptr;
87
88 m_pVideoCodec = nullptr;
89 m_pAudioCodec = nullptr;
90
91 m_Width = Width;
92 m_Height = Height;
93 str_copy(dst&: m_aName, src: pName);
94
95 m_FPS = g_Config.m_ClVideoRecorderFPS;
96
97 m_Recording = false;
98 m_Started = false;
99 m_Stopped = false;
100 m_ProcessingVideoFrame = 0;
101 m_ProcessingAudioFrame = 0;
102
103 m_HasAudio = m_pSound->IsSoundEnabled() && g_Config.m_ClVideoSndEnable;
104
105 dbg_assert(ms_pCurrentVideo == nullptr, "ms_pCurrentVideo is NOT set to nullptr while creating a new Video.");
106
107 ms_TickTime = time_freq() / m_FPS;
108 ms_pCurrentVideo = this;
109}
110
111CVideo::~CVideo()
112{
113 ms_pCurrentVideo = nullptr;
114}
115
116bool CVideo::Start()
117{
118 dbg_assert(!m_Started, "Already started");
119
120 // wait for the graphic thread to idle
121 m_pGraphics->WaitForIdle();
122
123 m_AudioStream = {};
124 m_VideoStream = {};
125
126 char aWholePath[IO_MAX_PATH_LENGTH];
127 IOHANDLE File = m_pStorage->OpenFile(pFilename: m_aName, Flags: IOFLAG_WRITE, Type: IStorage::TYPE_SAVE, pBuffer: aWholePath, BufferSize: sizeof(aWholePath));
128 if(File)
129 {
130 io_close(io: File);
131 }
132 else
133 {
134 log_error("videorecorder", "Could not open file '%s'", aWholePath);
135 return false;
136 }
137
138 const int FormatAllocResult = avformat_alloc_output_context2(ctx: &m_pFormatContext, oformat: nullptr, format_name: "mp4", filename: aWholePath);
139 if(FormatAllocResult < 0 || !m_pFormatContext)
140 {
141 char aError[AV_ERROR_MAX_STRING_SIZE];
142 av_strerror(errnum: FormatAllocResult, errbuf: aError, errbuf_size: sizeof(aError));
143 log_error("videorecorder", "Could not create format context: %s", aError);
144 return false;
145 }
146
147 m_pFormat = m_pFormatContext->oformat;
148
149#if defined(CONF_ARCH_IA32) || defined(CONF_ARCH_ARM) || defined(CONF_ARCH_WASM)
150 // use only the minimum of 2 threads on 32-bit to save memory
151 m_VideoThreads = 2;
152 m_AudioThreads = 2;
153#else
154 m_VideoThreads = std::thread::hardware_concurrency() + 2;
155 // audio gets a bit less
156 m_AudioThreads = (std::thread::hardware_concurrency() / 2) + 2;
157#endif
158
159 m_CurVideoThreadIndex = 0;
160 m_CurAudioThreadIndex = 0;
161
162 const size_t VideoBufferSize = (size_t)4 * m_Width * m_Height * sizeof(uint8_t);
163 m_vVideoBuffers.resize(new_size: m_VideoThreads);
164 for(size_t i = 0; i < m_VideoThreads; ++i)
165 {
166 m_vVideoBuffers[i].m_vBuffer.resize(new_size: VideoBufferSize);
167 }
168
169 m_vAudioBuffers.resize(new_size: m_AudioThreads);
170
171 /* Add the audio and video streams using the default format codecs
172 * and initialize the codecs. */
173 if(m_pFormat->video_codec != AV_CODEC_ID_NONE)
174 {
175 if(!AddStream(pStream: &m_VideoStream, pFormatContext: m_pFormatContext, ppCodec: &m_pVideoCodec, CodecId: m_pFormat->video_codec))
176 return false;
177 }
178 else
179 {
180 log_error("videorecorder", "Could not determine default video stream codec");
181 return false;
182 }
183
184 if(m_HasAudio)
185 {
186 if(m_pFormat->audio_codec != AV_CODEC_ID_NONE)
187 {
188 if(!AddStream(pStream: &m_AudioStream, pFormatContext: m_pFormatContext, ppCodec: &m_pAudioCodec, CodecId: m_pFormat->audio_codec))
189 return false;
190 }
191 else
192 {
193 log_error("videorecorder", "Could not determine default audio stream codec");
194 return false;
195 }
196 }
197
198 m_vpVideoThreads.resize(new_size: m_VideoThreads);
199 for(size_t i = 0; i < m_VideoThreads; ++i)
200 {
201 m_vpVideoThreads[i] = std::make_unique<CVideoRecorderThread>();
202 }
203 for(size_t i = 0; i < m_VideoThreads; ++i)
204 {
205 std::unique_lock<std::mutex> Lock(m_vpVideoThreads[i]->m_Mutex);
206 m_vpVideoThreads[i]->m_Thread = std::thread([this, i]() REQUIRES(!m_WriteLock) { RunVideoThread(ParentThreadIndex: i == 0 ? (m_VideoThreads - 1) : (i - 1), ThreadIndex: i); });
207 m_vpVideoThreads[i]->m_Cond.wait(lock&: Lock, p: [this, i]() -> bool { return m_vpVideoThreads[i]->m_Started; });
208 }
209
210 m_vpAudioThreads.resize(new_size: m_AudioThreads);
211 for(size_t i = 0; i < m_AudioThreads; ++i)
212 {
213 m_vpAudioThreads[i] = std::make_unique<CAudioRecorderThread>();
214 }
215 for(size_t i = 0; i < m_AudioThreads; ++i)
216 {
217 std::unique_lock<std::mutex> Lock(m_vpAudioThreads[i]->m_Mutex);
218 m_vpAudioThreads[i]->m_Thread = std::thread([this, i]() REQUIRES(!m_WriteLock) { RunAudioThread(ParentThreadIndex: i == 0 ? (m_AudioThreads - 1) : (i - 1), ThreadIndex: i); });
219 m_vpAudioThreads[i]->m_Cond.wait(lock&: Lock, p: [this, i]() -> bool { return m_vpAudioThreads[i]->m_Started; });
220 }
221
222 /* Now that all the parameters are set, we can open the audio and
223 * video codecs and allocate the necessary encode buffers. */
224 if(!OpenVideo())
225 return false;
226
227 if(m_HasAudio && !OpenAudio())
228 return false;
229
230 /* open the output file, if needed */
231 if(!(m_pFormat->flags & AVFMT_NOFILE))
232 {
233 const int OpenResult = avio_open(s: &m_pFormatContext->pb, url: aWholePath, AVIO_FLAG_WRITE);
234 if(OpenResult < 0)
235 {
236 char aError[AV_ERROR_MAX_STRING_SIZE];
237 av_strerror(errnum: OpenResult, errbuf: aError, errbuf_size: sizeof(aError));
238 log_error("videorecorder", "Could not open file '%s': %s", aWholePath, aError);
239 return false;
240 }
241 }
242
243 m_VideoStream.m_vpSwsContexts.reserve(n: m_VideoThreads);
244
245 for(size_t i = 0; i < m_VideoThreads; ++i)
246 {
247 if(m_VideoStream.m_vpSwsContexts.size() <= i)
248 m_VideoStream.m_vpSwsContexts.emplace_back(args: nullptr);
249
250 if(!m_VideoStream.m_vpSwsContexts[i])
251 {
252 m_VideoStream.m_vpSwsContexts[i] = sws_getCachedContext(
253 context: m_VideoStream.m_vpSwsContexts[i],
254 srcW: m_VideoStream.m_pCodecContext->width, srcH: m_VideoStream.m_pCodecContext->height, srcFormat: AV_PIX_FMT_RGBA,
255 dstW: m_VideoStream.m_pCodecContext->width, dstH: m_VideoStream.m_pCodecContext->height, dstFormat: AV_PIX_FMT_YUV420P,
256 flags: SWS_FULL_CHR_H_INT | SWS_FULL_CHR_H_INP | SWS_ACCURATE_RND | SWS_BITEXACT, srcFilter: nullptr, dstFilter: nullptr, param: nullptr);
257
258 const int *pMatrixCoefficients = sws_getCoefficients(colorspace: COLOR_SPACE);
259 sws_setColorspaceDetails(c: m_VideoStream.m_vpSwsContexts[i], inv_table: pMatrixCoefficients, srcRange: 0, table: pMatrixCoefficients, dstRange: 0, brightness: 0, contrast: 1 << 16, saturation: 1 << 16);
260 }
261 }
262
263 /* Write the stream header, if any. */
264 const int WriteHeaderResult = avformat_write_header(s: m_pFormatContext, options: &m_pOptDict);
265 if(WriteHeaderResult < 0)
266 {
267 char aError[AV_ERROR_MAX_STRING_SIZE];
268 av_strerror(errnum: WriteHeaderResult, errbuf: aError, errbuf_size: sizeof(aError));
269 log_error("videorecorder", "Could not write header: %s", aError);
270 return false;
271 }
272
273 m_Recording = true;
274 m_Started = true;
275 m_Stopped = false;
276 ms_Time = time_get();
277 return true;
278}
279
280void CVideo::Pause(bool Pause)
281{
282 if(ms_pCurrentVideo)
283 m_Recording = !Pause;
284}
285
286void CVideo::Stop()
287{
288 dbg_assert(!m_Stopped, "Already stopped");
289 m_Stopped = true;
290
291 m_pGraphics->WaitForIdle();
292
293 for(auto &pVideoThread : m_vpVideoThreads)
294 {
295 {
296 std::unique_lock<std::mutex> Lock(pVideoThread->m_Mutex);
297 pVideoThread->m_Finished = true;
298 pVideoThread->m_Cond.notify_all();
299 }
300
301 pVideoThread->m_Thread.join();
302 }
303 m_vpVideoThreads.clear();
304
305 for(auto &pAudioThread : m_vpAudioThreads)
306 {
307 {
308 std::unique_lock<std::mutex> Lock(pAudioThread->m_Mutex);
309 pAudioThread->m_Finished = true;
310 pAudioThread->m_Cond.notify_all();
311 }
312
313 pAudioThread->m_Thread.join();
314 }
315 m_vpAudioThreads.clear();
316
317 while(m_ProcessingVideoFrame > 0 || m_ProcessingAudioFrame > 0)
318 std::this_thread::sleep_for(rtime: 10us);
319
320 m_Recording = false;
321
322 FinishFrames(pStream: &m_VideoStream);
323
324 if(m_HasAudio)
325 FinishFrames(pStream: &m_AudioStream);
326
327 if(m_pFormatContext && m_Started)
328 av_write_trailer(s: m_pFormatContext);
329
330 CloseStream(pStream: &m_VideoStream);
331
332 if(m_HasAudio)
333 CloseStream(pStream: &m_AudioStream);
334
335 if(m_pFormatContext)
336 {
337 if(!(m_pFormat->flags & AVFMT_NOFILE))
338 avio_closep(s: &m_pFormatContext->pb);
339
340 avformat_free_context(s: m_pFormatContext);
341 }
342
343 ISound *volatile pSound = m_pSound;
344
345 pSound->PauseAudioDevice();
346 delete ms_pCurrentVideo;
347 pSound->UnpauseAudioDevice();
348}
349
350void CVideo::NextVideoFrameThread()
351{
352 if(m_Recording)
353 {
354 m_VideoFrameIndex += 1;
355 if(m_VideoFrameIndex >= 2)
356 {
357 m_ProcessingVideoFrame.fetch_add(i: 1);
358
359 size_t NextVideoThreadIndex = m_CurVideoThreadIndex + 1;
360 if(NextVideoThreadIndex == m_VideoThreads)
361 NextVideoThreadIndex = 0;
362
363 // always wait for the next video thread too, to prevent a dead lock
364 {
365 auto *pVideoThread = m_vpVideoThreads[NextVideoThreadIndex].get();
366 std::unique_lock<std::mutex> Lock(pVideoThread->m_Mutex);
367
368 if(pVideoThread->m_HasVideoFrame)
369 {
370 pVideoThread->m_Cond.wait(lock&: Lock, p: [&pVideoThread]() -> bool { return !pVideoThread->m_HasVideoFrame; });
371 }
372 }
373
374 // after reading the graphic libraries' frame buffer, go threaded
375 {
376 auto *pVideoThread = m_vpVideoThreads[m_CurVideoThreadIndex].get();
377 std::unique_lock<std::mutex> Lock(pVideoThread->m_Mutex);
378
379 if(pVideoThread->m_HasVideoFrame)
380 {
381 pVideoThread->m_Cond.wait(lock&: Lock, p: [&pVideoThread]() -> bool { return !pVideoThread->m_HasVideoFrame; });
382 }
383
384 UpdateVideoBufferFromGraphics(ThreadIndex: m_CurVideoThreadIndex);
385
386 pVideoThread->m_HasVideoFrame = true;
387 {
388 std::unique_lock<std::mutex> LockParent(pVideoThread->m_VideoFillMutex);
389 pVideoThread->m_VideoFrameToFill = m_VideoFrameIndex;
390 }
391 pVideoThread->m_Cond.notify_all();
392 }
393
394 ++m_CurVideoThreadIndex;
395 if(m_CurVideoThreadIndex == m_VideoThreads)
396 m_CurVideoThreadIndex = 0;
397 }
398 }
399}
400
401void CVideo::NextVideoFrame()
402{
403 if(m_Recording)
404 {
405 ms_Time += ms_TickTime;
406 ms_LocalTime = (ms_Time - ms_LocalStartTime) / (float)time_freq();
407 }
408}
409
410void CVideo::NextAudioFrameTimeline(ISoundMixFunc Mix)
411{
412 if(m_Recording && m_HasAudio)
413 {
414 double SamplesPerFrame = (double)m_AudioStream.m_pCodecContext->sample_rate / m_FPS;
415 while(m_AudioStream.m_SamplesFrameCount >= m_AudioStream.m_SamplesCount)
416 {
417 NextAudioFrame(Mix);
418 }
419 m_AudioStream.m_SamplesFrameCount += SamplesPerFrame;
420 }
421}
422
423void CVideo::NextAudioFrame(ISoundMixFunc Mix)
424{
425 if(m_Recording && m_HasAudio)
426 {
427 m_AudioFrameIndex += 1;
428
429 m_ProcessingAudioFrame.fetch_add(i: 1);
430
431 size_t NextAudioThreadIndex = m_CurAudioThreadIndex + 1;
432 if(NextAudioThreadIndex == m_AudioThreads)
433 NextAudioThreadIndex = 0;
434
435 // always wait for the next Audio thread too, to prevent a dead lock
436
437 {
438 auto *pAudioThread = m_vpAudioThreads[NextAudioThreadIndex].get();
439 std::unique_lock<std::mutex> Lock(pAudioThread->m_Mutex);
440
441 if(pAudioThread->m_HasAudioFrame)
442 {
443 pAudioThread->m_Cond.wait(lock&: Lock, p: [&pAudioThread]() -> bool { return !pAudioThread->m_HasAudioFrame; });
444 }
445 }
446
447 // after reading the graphic libraries' frame buffer, go threaded
448 {
449 auto *pAudioThread = m_vpAudioThreads[m_CurAudioThreadIndex].get();
450
451 std::unique_lock<std::mutex> Lock(pAudioThread->m_Mutex);
452
453 if(pAudioThread->m_HasAudioFrame)
454 {
455 pAudioThread->m_Cond.wait(lock&: Lock, p: [&pAudioThread]() -> bool { return !pAudioThread->m_HasAudioFrame; });
456 }
457
458 Mix(m_vAudioBuffers[m_CurAudioThreadIndex].m_aBuffer, std::size(m_vAudioBuffers[m_CurAudioThreadIndex].m_aBuffer) / 2 / 2); // two channels
459
460 int64_t DstNbSamples = av_rescale_rnd(
461 a: swr_get_delay(s: m_AudioStream.m_vpSwrContexts[m_CurAudioThreadIndex], base: m_AudioStream.m_pCodecContext->sample_rate) +
462 m_AudioStream.m_vpFrames[m_CurAudioThreadIndex]->nb_samples,
463 b: m_AudioStream.m_pCodecContext->sample_rate,
464 c: m_AudioStream.m_pCodecContext->sample_rate, rnd: AV_ROUND_UP);
465
466 pAudioThread->m_SampleCountStart = m_AudioStream.m_SamplesCount;
467 m_AudioStream.m_SamplesCount += DstNbSamples;
468
469 pAudioThread->m_HasAudioFrame = true;
470 {
471 std::unique_lock<std::mutex> LockParent(pAudioThread->m_AudioFillMutex);
472 pAudioThread->m_AudioFrameToFill = m_AudioFrameIndex;
473 }
474 pAudioThread->m_Cond.notify_all();
475 }
476
477 ++m_CurAudioThreadIndex;
478 if(m_CurAudioThreadIndex == m_AudioThreads)
479 m_CurAudioThreadIndex = 0;
480 }
481}
482
483void CVideo::RunAudioThread(size_t ParentThreadIndex, size_t ThreadIndex)
484{
485 auto *pThreadData = m_vpAudioThreads[ThreadIndex].get();
486 auto *pParentThreadData = m_vpAudioThreads[ParentThreadIndex].get();
487 std::unique_lock<std::mutex> Lock(pThreadData->m_Mutex);
488 pThreadData->m_Started = true;
489 pThreadData->m_Cond.notify_all();
490
491 while(!pThreadData->m_Finished)
492 {
493 pThreadData->m_Cond.wait(lock&: Lock, p: [&pThreadData]() -> bool { return pThreadData->m_HasAudioFrame || pThreadData->m_Finished; });
494 pThreadData->m_Cond.notify_all();
495
496 if(pThreadData->m_HasAudioFrame)
497 {
498 FillAudioFrame(ThreadIndex);
499 // check if we need to wait for the parent to finish
500 {
501 std::unique_lock<std::mutex> LockParent(pParentThreadData->m_AudioFillMutex);
502 if(pParentThreadData->m_AudioFrameToFill != 0 && pThreadData->m_AudioFrameToFill >= pParentThreadData->m_AudioFrameToFill)
503 {
504 // wait for the parent to finish its frame
505 pParentThreadData->m_AudioFillCond.wait(lock&: LockParent, p: [&pParentThreadData]() -> bool { return pParentThreadData->m_AudioFrameToFill == 0; });
506 }
507 }
508 {
509 std::unique_lock<std::mutex> LockAudio(pThreadData->m_AudioFillMutex);
510
511 {
512 const CLockScope LockScope(m_WriteLock);
513 m_AudioStream.m_vpFrames[ThreadIndex]->pts = av_rescale_q(a: pThreadData->m_SampleCountStart, bq: AVRational{.num: 1, .den: m_AudioStream.m_pCodecContext->sample_rate}, cq: m_AudioStream.m_pCodecContext->time_base);
514 WriteFrame(pStream: &m_AudioStream, ThreadIndex);
515 }
516
517 pThreadData->m_AudioFrameToFill = 0;
518 pThreadData->m_AudioFillCond.notify_all();
519 pThreadData->m_Cond.notify_all();
520 }
521 m_ProcessingAudioFrame.fetch_sub(i: 1);
522
523 pThreadData->m_HasAudioFrame = false;
524 }
525 }
526}
527
528void CVideo::FillAudioFrame(size_t ThreadIndex)
529{
530 const int FillArrayResult = av_samples_fill_arrays(
531 audio_data: (uint8_t **)m_AudioStream.m_vpTmpFrames[ThreadIndex]->data,
532 linesize: nullptr, // pointer to linesize (int*)
533 buf: (const uint8_t *)m_vAudioBuffers[ThreadIndex].m_aBuffer,
534 nb_channels: 2, // channels
535 nb_samples: m_AudioStream.m_vpTmpFrames[ThreadIndex]->nb_samples,
536 sample_fmt: AV_SAMPLE_FMT_S16,
537 align: 0 // align
538 );
539 if(FillArrayResult < 0)
540 {
541 char aError[AV_ERROR_MAX_STRING_SIZE];
542 av_strerror(errnum: FillArrayResult, errbuf: aError, errbuf_size: sizeof(aError));
543 log_error("videorecorder", "Could not fill audio frame: %s", aError);
544 return;
545 }
546
547 const int MakeWriteableResult = av_frame_make_writable(frame: m_AudioStream.m_vpFrames[ThreadIndex]);
548 if(MakeWriteableResult < 0)
549 {
550 char aError[AV_ERROR_MAX_STRING_SIZE];
551 av_strerror(errnum: MakeWriteableResult, errbuf: aError, errbuf_size: sizeof(aError));
552 log_error("videorecorder", "Could not make audio frame writeable: %s", aError);
553 return;
554 }
555
556 /* convert to destination format */
557 const int ConvertResult = swr_convert(
558 s: m_AudioStream.m_vpSwrContexts[ThreadIndex],
559 out: m_AudioStream.m_vpFrames[ThreadIndex]->data,
560 out_count: m_AudioStream.m_vpFrames[ThreadIndex]->nb_samples,
561 in: (const uint8_t **)m_AudioStream.m_vpTmpFrames[ThreadIndex]->data,
562 in_count: m_AudioStream.m_vpTmpFrames[ThreadIndex]->nb_samples);
563 if(ConvertResult < 0)
564 {
565 char aError[AV_ERROR_MAX_STRING_SIZE];
566 av_strerror(errnum: ConvertResult, errbuf: aError, errbuf_size: sizeof(aError));
567 log_error("videorecorder", "Could not convert audio frame: %s", aError);
568 return;
569 }
570}
571
572void CVideo::RunVideoThread(size_t ParentThreadIndex, size_t ThreadIndex)
573{
574 auto *pThreadData = m_vpVideoThreads[ThreadIndex].get();
575 auto *pParentThreadData = m_vpVideoThreads[ParentThreadIndex].get();
576 std::unique_lock<std::mutex> Lock(pThreadData->m_Mutex);
577 pThreadData->m_Started = true;
578 pThreadData->m_Cond.notify_all();
579
580 while(!pThreadData->m_Finished)
581 {
582 pThreadData->m_Cond.wait(lock&: Lock, p: [&pThreadData]() -> bool { return pThreadData->m_HasVideoFrame || pThreadData->m_Finished; });
583 pThreadData->m_Cond.notify_all();
584
585 if(pThreadData->m_HasVideoFrame)
586 {
587 FillVideoFrame(ThreadIndex);
588 // check if we need to wait for the parent to finish
589 {
590 std::unique_lock<std::mutex> LockParent(pParentThreadData->m_VideoFillMutex);
591 if(pParentThreadData->m_VideoFrameToFill != 0 && pThreadData->m_VideoFrameToFill >= pParentThreadData->m_VideoFrameToFill)
592 {
593 // wait for the parent to finish its frame
594 pParentThreadData->m_VideoFillCond.wait(lock&: LockParent, p: [&pParentThreadData]() -> bool { return pParentThreadData->m_VideoFrameToFill == 0; });
595 }
596 }
597 {
598 std::unique_lock<std::mutex> LockVideo(pThreadData->m_VideoFillMutex);
599 {
600 const CLockScope LockScope(m_WriteLock);
601#if LIBAVCODEC_VERSION_MAJOR >= 60
602 m_VideoStream.m_vpFrames[ThreadIndex]->pts = m_VideoStream.m_pCodecContext->frame_num;
603#else
604 m_VideoStream.m_vpFrames[ThreadIndex]->pts = m_VideoStream.m_pCodecContext->frame_number;
605#endif
606 WriteFrame(pStream: &m_VideoStream, ThreadIndex);
607 }
608
609 pThreadData->m_VideoFrameToFill = 0;
610 pThreadData->m_VideoFillCond.notify_all();
611 pThreadData->m_Cond.notify_all();
612 }
613 m_ProcessingVideoFrame.fetch_sub(i: 1);
614
615 pThreadData->m_HasVideoFrame = false;
616 }
617 }
618}
619
620void CVideo::FillVideoFrame(size_t ThreadIndex)
621{
622 const int InLineSize = 4 * m_VideoStream.m_pCodecContext->width;
623 auto *pRGBAData = m_vVideoBuffers[ThreadIndex].m_vBuffer.data();
624 sws_scale(c: m_VideoStream.m_vpSwsContexts[ThreadIndex], srcSlice: (const uint8_t *const *)&pRGBAData, srcStride: &InLineSize, srcSliceY: 0,
625 srcSliceH: m_VideoStream.m_pCodecContext->height, dst: m_VideoStream.m_vpFrames[ThreadIndex]->data, dstStride: m_VideoStream.m_vpFrames[ThreadIndex]->linesize);
626}
627
628void CVideo::UpdateVideoBufferFromGraphics(size_t ThreadIndex)
629{
630 uint32_t Width;
631 uint32_t Height;
632 CImageInfo::EImageFormat Format;
633 m_pGraphics->GetReadPresentedImageDataFuncUnsafe()(Width, Height, Format, m_vVideoBuffers[ThreadIndex].m_vBuffer);
634 dbg_assert((int)Width == m_Width && (int)Height == m_Height, "Size mismatch between video (%d x %d) and graphics (%d x %d)", m_Width, m_Height, Width, Height);
635 dbg_assert(Format == CImageInfo::FORMAT_RGBA, "Unexpected image format %d", (int)Format);
636}
637
638AVFrame *CVideo::AllocPicture(enum AVPixelFormat PixFmt, int Width, int Height)
639{
640 AVFrame *pPicture = av_frame_alloc();
641 if(!pPicture)
642 {
643 log_error("videorecorder", "Could not allocate video frame");
644 return nullptr;
645 }
646
647 pPicture->format = PixFmt;
648 pPicture->width = Width;
649 pPicture->height = Height;
650
651 /* allocate the buffers for the frame data */
652 const int FrameBufferAllocResult = av_frame_get_buffer(frame: pPicture, align: 32);
653 if(FrameBufferAllocResult < 0)
654 {
655 char aError[AV_ERROR_MAX_STRING_SIZE];
656 av_strerror(errnum: FrameBufferAllocResult, errbuf: aError, errbuf_size: sizeof(aError));
657 log_error("videorecorder", "Could not allocate video frame buffer: %s", aError);
658 return nullptr;
659 }
660
661 return pPicture;
662}
663
664AVFrame *CVideo::AllocAudioFrame(enum AVSampleFormat SampleFmt, uint64_t ChannelLayout, int SampleRate, int NbSamples)
665{
666 AVFrame *pFrame = av_frame_alloc();
667 if(!pFrame)
668 {
669 log_error("videorecorder", "Could not allocate audio frame");
670 return nullptr;
671 }
672
673 pFrame->format = SampleFmt;
674#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100)
675 dbg_assert(av_channel_layout_from_mask(&pFrame->ch_layout, ChannelLayout) == 0, "Failed to set channel layout");
676#else
677 pFrame->channel_layout = ChannelLayout;
678#endif
679 pFrame->sample_rate = SampleRate;
680 pFrame->nb_samples = NbSamples;
681
682 if(NbSamples)
683 {
684 const int FrameBufferAllocResult = av_frame_get_buffer(frame: pFrame, align: 0);
685 if(FrameBufferAllocResult < 0)
686 {
687 char aError[AV_ERROR_MAX_STRING_SIZE];
688 av_strerror(errnum: FrameBufferAllocResult, errbuf: aError, errbuf_size: sizeof(aError));
689 log_error("videorecorder", "Could not allocate audio frame buffer: %s", aError);
690 return nullptr;
691 }
692 }
693
694 return pFrame;
695}
696
697bool CVideo::OpenVideo()
698{
699 AVCodecContext *pContext = m_VideoStream.m_pCodecContext;
700 AVDictionary *pOptions = nullptr;
701 av_dict_copy(dst: &pOptions, src: m_pOptDict, flags: 0);
702
703 /* open the codec */
704 const int VideoOpenResult = avcodec_open2(avctx: pContext, codec: m_pVideoCodec, options: &pOptions);
705 av_dict_free(m: &pOptions);
706 if(VideoOpenResult < 0)
707 {
708 char aError[AV_ERROR_MAX_STRING_SIZE];
709 av_strerror(errnum: VideoOpenResult, errbuf: aError, errbuf_size: sizeof(aError));
710 log_error("videorecorder", "Could not open video codec: %s", aError);
711 return false;
712 }
713
714 m_VideoStream.m_vpFrames.clear();
715 m_VideoStream.m_vpFrames.reserve(n: m_VideoThreads);
716
717 /* allocate and init a re-usable frame */
718 for(size_t i = 0; i < m_VideoThreads; ++i)
719 {
720 m_VideoStream.m_vpFrames.emplace_back(args: nullptr);
721 m_VideoStream.m_vpFrames[i] = AllocPicture(PixFmt: pContext->pix_fmt, Width: pContext->width, Height: pContext->height);
722 if(!m_VideoStream.m_vpFrames[i])
723 {
724 return false;
725 }
726 }
727
728 /* If the output format is not YUV420P, then a temporary YUV420P
729 * picture is needed too. It is then converted to the required
730 * output format. */
731 m_VideoStream.m_vpTmpFrames.clear();
732 m_VideoStream.m_vpTmpFrames.reserve(n: m_VideoThreads);
733
734 if(pContext->pix_fmt != AV_PIX_FMT_YUV420P)
735 {
736 /* allocate and init a re-usable frame */
737 for(size_t i = 0; i < m_VideoThreads; ++i)
738 {
739 m_VideoStream.m_vpTmpFrames.emplace_back(args: nullptr);
740 m_VideoStream.m_vpTmpFrames[i] = AllocPicture(PixFmt: AV_PIX_FMT_YUV420P, Width: pContext->width, Height: pContext->height);
741 if(!m_VideoStream.m_vpTmpFrames[i])
742 {
743 return false;
744 }
745 }
746 }
747
748 /* copy the stream parameters to the muxer */
749 const int AudioStreamCopyResult = avcodec_parameters_from_context(par: m_VideoStream.m_pStream->codecpar, codec: pContext);
750 if(AudioStreamCopyResult < 0)
751 {
752 char aError[AV_ERROR_MAX_STRING_SIZE];
753 av_strerror(errnum: AudioStreamCopyResult, errbuf: aError, errbuf_size: sizeof(aError));
754 log_error("videorecorder", "Could not copy video stream parameters: %s", aError);
755 return false;
756 }
757 m_VideoFrameIndex = 0;
758 return true;
759}
760
761bool CVideo::OpenAudio()
762{
763 AVCodecContext *pContext = m_AudioStream.m_pCodecContext;
764 AVDictionary *pOptions = nullptr;
765 av_dict_copy(dst: &pOptions, src: m_pOptDict, flags: 0);
766
767 /* open it */
768 const int AudioOpenResult = avcodec_open2(avctx: pContext, codec: m_pAudioCodec, options: &pOptions);
769 av_dict_free(m: &pOptions);
770 if(AudioOpenResult < 0)
771 {
772 char aError[AV_ERROR_MAX_STRING_SIZE];
773 av_strerror(errnum: AudioOpenResult, errbuf: aError, errbuf_size: sizeof(aError));
774 log_error("videorecorder", "Could not open audio codec: %s", aError);
775 return false;
776 }
777
778 int NbSamples;
779 if(pContext->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)
780 NbSamples = 10000;
781 else
782 NbSamples = pContext->frame_size;
783
784 m_AudioStream.m_vpFrames.clear();
785 m_AudioStream.m_vpFrames.reserve(n: m_AudioThreads);
786
787 m_AudioStream.m_vpTmpFrames.clear();
788 m_AudioStream.m_vpTmpFrames.reserve(n: m_AudioThreads);
789
790 /* allocate and init a re-usable frame */
791 for(size_t i = 0; i < m_AudioThreads; ++i)
792 {
793 m_AudioStream.m_vpFrames.emplace_back(args: nullptr);
794#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100)
795 m_AudioStream.m_vpFrames[i] = AllocAudioFrame(SampleFmt: pContext->sample_fmt, ChannelLayout: pContext->ch_layout.u.mask, SampleRate: pContext->sample_rate, NbSamples);
796#else
797 m_AudioStream.m_vpFrames[i] = AllocAudioFrame(pContext->sample_fmt, pContext->channel_layout, pContext->sample_rate, NbSamples);
798#endif
799 if(!m_AudioStream.m_vpFrames[i])
800 {
801 return false;
802 }
803
804 m_AudioStream.m_vpTmpFrames.emplace_back(args: nullptr);
805 m_AudioStream.m_vpTmpFrames[i] = AllocAudioFrame(SampleFmt: AV_SAMPLE_FMT_S16, AV_CH_LAYOUT_STEREO, SampleRate: m_pSound->MixingRate(), NbSamples);
806 if(!m_AudioStream.m_vpTmpFrames[i])
807 {
808 return false;
809 }
810 }
811
812 /* copy the stream parameters to the muxer */
813 const int AudioStreamCopyResult = avcodec_parameters_from_context(par: m_AudioStream.m_pStream->codecpar, codec: pContext);
814 if(AudioStreamCopyResult < 0)
815 {
816 char aError[AV_ERROR_MAX_STRING_SIZE];
817 av_strerror(errnum: AudioStreamCopyResult, errbuf: aError, errbuf_size: sizeof(aError));
818 log_error("videorecorder", "Could not copy audio stream parameters: %s", aError);
819 return false;
820 }
821
822 /* create resampling context */
823 m_AudioStream.m_vpSwrContexts.clear();
824 m_AudioStream.m_vpSwrContexts.resize(new_size: m_AudioThreads);
825 for(size_t i = 0; i < m_AudioThreads; ++i)
826 {
827 m_AudioStream.m_vpSwrContexts[i] = swr_alloc();
828 if(!m_AudioStream.m_vpSwrContexts[i])
829 {
830 log_error("videorecorder", "Could not allocate resampling context");
831 return false;
832 }
833
834 /* set options */
835#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100)
836 dbg_assert(av_opt_set_chlayout(m_AudioStream.m_vpSwrContexts[i], "in_chlayout", &pContext->ch_layout, 0) == 0, "invalid option");
837#else
838 dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrContexts[i], "in_channel_count", pContext->channels, 0) == 0, "invalid option");
839#endif
840 if(av_opt_set_int(obj: m_AudioStream.m_vpSwrContexts[i], name: "in_sample_rate", val: m_pSound->MixingRate(), search_flags: 0) != 0)
841 {
842 log_error("videorecorder", "Could not set audio sample rate to %d", m_pSound->MixingRate());
843 return false;
844 }
845 dbg_assert(av_opt_set_sample_fmt(m_AudioStream.m_vpSwrContexts[i], "in_sample_fmt", AV_SAMPLE_FMT_S16, 0) == 0, "invalid option");
846#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100)
847 dbg_assert(av_opt_set_chlayout(m_AudioStream.m_vpSwrContexts[i], "out_chlayout", &pContext->ch_layout, 0) == 0, "invalid option");
848#else
849 dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrContexts[i], "out_channel_count", pContext->channels, 0) == 0, "invalid option");
850#endif
851 dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrContexts[i], "out_sample_rate", pContext->sample_rate, 0) == 0, "invalid option");
852 dbg_assert(av_opt_set_sample_fmt(m_AudioStream.m_vpSwrContexts[i], "out_sample_fmt", pContext->sample_fmt, 0) == 0, "invalid option");
853
854 /* initialize the resampling context */
855 const int ResamplingContextInitResult = swr_init(s: m_AudioStream.m_vpSwrContexts[i]);
856 if(ResamplingContextInitResult < 0)
857 {
858 char aError[AV_ERROR_MAX_STRING_SIZE];
859 av_strerror(errnum: ResamplingContextInitResult, errbuf: aError, errbuf_size: sizeof(aError));
860 log_error("videorecorder", "Could not initialize resampling context: %s", aError);
861 return false;
862 }
863 }
864
865 m_AudioFrameIndex = 0;
866 return true;
867}
868
869/* Add an output stream. */
870bool CVideo::AddStream(COutputStream *pStream, AVFormatContext *pFormatContext, const AVCodec **ppCodec, enum AVCodecID CodecId) const
871{
872 /* find the encoder */
873 *ppCodec = avcodec_find_encoder(id: CodecId);
874 if(!(*ppCodec))
875 {
876 log_error("videorecorder", "Could not find encoder for codec '%s'", avcodec_get_name(CodecId));
877 return false;
878 }
879
880 pStream->m_pStream = avformat_new_stream(s: pFormatContext, c: nullptr);
881 if(!pStream->m_pStream)
882 {
883 log_error("videorecorder", "Could not allocate stream");
884 return false;
885 }
886 pStream->m_pStream->id = pFormatContext->nb_streams - 1;
887 AVCodecContext *pContext = avcodec_alloc_context3(codec: *ppCodec);
888 if(!pContext)
889 {
890 log_error("videorecorder", "Could not allocate encoding context");
891 return false;
892 }
893 pStream->m_pCodecContext = pContext;
894
895#if defined(CONF_ARCH_IA32) || defined(CONF_ARCH_ARM) || defined(CONF_ARCH_WASM)
896 // use only 1 ffmpeg thread on 32-bit to save memory
897 pContext->thread_count = 1;
898#endif
899
900 switch((*ppCodec)->type)
901 {
902 case AVMEDIA_TYPE_AUDIO:
903 {
904 const AVSampleFormat *pSampleFormats = nullptr;
905 const int *pSampleRates = nullptr;
906#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(61, 13, 100)
907 avcodec_get_supported_config(avctx: pContext, codec: *ppCodec, config: AV_CODEC_CONFIG_SAMPLE_FORMAT, flags: 0, out_configs: (const void **)&pSampleFormats, out_num_configs: nullptr);
908 avcodec_get_supported_config(avctx: pContext, codec: *ppCodec, config: AV_CODEC_CONFIG_SAMPLE_RATE, flags: 0, out_configs: (const void **)&pSampleRates, out_num_configs: nullptr);
909#else
910 pSampleFormats = (*ppCodec)->sample_fmts;
911 pSampleRates = (*ppCodec)->supported_samplerates;
912#endif
913 pContext->sample_fmt = pSampleFormats ? pSampleFormats[0] : AV_SAMPLE_FMT_FLTP;
914 if(pSampleRates)
915 {
916 pContext->sample_rate = pSampleRates[0];
917 for(int i = 0; pSampleRates[i]; i++)
918 {
919 if(pSampleRates[i] == m_pSound->MixingRate())
920 {
921 pContext->sample_rate = m_pSound->MixingRate();
922 break;
923 }
924 }
925 }
926 else
927 {
928 pContext->sample_rate = m_pSound->MixingRate();
929 }
930 pContext->bit_rate = pContext->sample_rate * 2 * 16;
931#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100)
932 dbg_assert(av_channel_layout_from_mask(&pContext->ch_layout, AV_CH_LAYOUT_STEREO) == 0, "Failed to set channel layout");
933#else
934 pContext->channels = 2;
935 pContext->channel_layout = AV_CH_LAYOUT_STEREO;
936#endif
937
938 pStream->m_pStream->time_base.num = 1;
939 pStream->m_pStream->time_base.den = pContext->sample_rate;
940 break;
941 }
942
943 case AVMEDIA_TYPE_VIDEO:
944 pContext->codec_id = CodecId;
945
946 pContext->bit_rate = 400000;
947 /* Resolution must be a multiple of two. */
948 pContext->width = m_Width;
949 pContext->height = m_Height % 2 == 0 ? m_Height : m_Height - 1;
950 /* timebase: This is the fundamental unit of time (in seconds) in terms
951 * of which frame timestamps are represented. For fixed-fps content,
952 * timebase should be 1/framerate and timestamp increments should be
953 * identical to 1. */
954 pStream->m_pStream->time_base.num = 1;
955 pStream->m_pStream->time_base.den = m_FPS;
956 pContext->time_base = pStream->m_pStream->time_base;
957
958 pContext->gop_size = 12; /* emit one intra frame every twelve frames at most */
959 pContext->pix_fmt = AV_PIX_FMT_YUV420P;
960 pContext->colorspace = COLOR_SPACE;
961 if(pContext->codec_id == AV_CODEC_ID_MPEG2VIDEO)
962 {
963 /* just for testing, we also add B-frames */
964 pContext->max_b_frames = 2;
965 }
966 if(pContext->codec_id == AV_CODEC_ID_MPEG1VIDEO)
967 {
968 /* Needed to avoid using macroblocks in which some coeffs overflow.
969 * This does not happen with normal video, it just happens here as
970 * the motion of the chroma plane does not match the luma plane. */
971 pContext->mb_decision = 2;
972 }
973 if(CodecId == AV_CODEC_ID_H264)
974 {
975 static const char *s_apPresets[10] = {"ultrafast", "superfast", "veryfast", "faster", "fast", "medium", "slow", "slower", "veryslow", "placebo"};
976 dbg_assert(g_Config.m_ClVideoX264Preset < (int)std::size(s_apPresets), "preset index invalid: %d", g_Config.m_ClVideoX264Preset);
977 dbg_assert(av_opt_set(pContext->priv_data, "preset", s_apPresets[g_Config.m_ClVideoX264Preset], 0) == 0, "invalid option");
978 dbg_assert(av_opt_set_int(pContext->priv_data, "crf", g_Config.m_ClVideoX264Crf, 0) == 0, "invalid option");
979 }
980 break;
981
982 default:
983 break;
984 }
985
986 /* Some formats want stream headers to be separate. */
987 if(pFormatContext->oformat->flags & AVFMT_GLOBALHEADER)
988 pContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
989
990 return true;
991}
992
993void CVideo::WriteFrame(COutputStream *pStream, size_t ThreadIndex)
994{
995 AVPacket *pPacket = av_packet_alloc();
996 if(pPacket == nullptr)
997 {
998 log_error("videorecorder", "Could not allocate packet");
999 return;
1000 }
1001
1002 pPacket->data = nullptr;
1003 pPacket->size = 0;
1004
1005 avcodec_send_frame(avctx: pStream->m_pCodecContext, frame: pStream->m_vpFrames[ThreadIndex]);
1006 int RecvResult = 0;
1007 do
1008 {
1009 RecvResult = avcodec_receive_packet(avctx: pStream->m_pCodecContext, avpkt: pPacket);
1010 if(!RecvResult)
1011 {
1012 /* rescale output packet timestamp values from codec to stream timebase */
1013 av_packet_rescale_ts(pkt: pPacket, tb_src: pStream->m_pCodecContext->time_base, tb_dst: pStream->m_pStream->time_base);
1014 pPacket->stream_index = pStream->m_pStream->index;
1015
1016 const int WriteFrameResult = av_interleaved_write_frame(s: m_pFormatContext, pkt: pPacket);
1017 if(WriteFrameResult < 0)
1018 {
1019 char aError[AV_ERROR_MAX_STRING_SIZE];
1020 av_strerror(errnum: WriteFrameResult, errbuf: aError, errbuf_size: sizeof(aError));
1021 log_error("videorecorder", "Could not write video frame: %s", aError);
1022 }
1023 }
1024 else
1025 break;
1026 } while(true);
1027
1028 if(RecvResult && RecvResult != AVERROR(EAGAIN))
1029 {
1030 char aError[AV_ERROR_MAX_STRING_SIZE];
1031 av_strerror(errnum: RecvResult, errbuf: aError, errbuf_size: sizeof(aError));
1032 log_error("videorecorder", "Could not encode video frame: %s", aError);
1033 }
1034
1035 av_packet_free(pkt: &pPacket);
1036}
1037
1038void CVideo::FinishFrames(COutputStream *pStream)
1039{
1040 if(!pStream->m_pCodecContext || !avcodec_is_open(s: pStream->m_pCodecContext))
1041 return;
1042
1043 AVPacket *pPacket = av_packet_alloc();
1044 if(pPacket == nullptr)
1045 {
1046 log_error("videorecorder", "Could not allocate packet");
1047 return;
1048 }
1049
1050 pPacket->data = nullptr;
1051 pPacket->size = 0;
1052
1053 avcodec_send_frame(avctx: pStream->m_pCodecContext, frame: nullptr);
1054 int RecvResult = 0;
1055 do
1056 {
1057 RecvResult = avcodec_receive_packet(avctx: pStream->m_pCodecContext, avpkt: pPacket);
1058 if(!RecvResult)
1059 {
1060 /* rescale output packet timestamp values from codec to stream timebase */
1061 av_packet_rescale_ts(pkt: pPacket, tb_src: pStream->m_pCodecContext->time_base, tb_dst: pStream->m_pStream->time_base);
1062 pPacket->stream_index = pStream->m_pStream->index;
1063
1064 const int WriteFrameResult = av_interleaved_write_frame(s: m_pFormatContext, pkt: pPacket);
1065 if(WriteFrameResult < 0)
1066 {
1067 char aError[AV_ERROR_MAX_STRING_SIZE];
1068 av_strerror(errnum: WriteFrameResult, errbuf: aError, errbuf_size: sizeof(aError));
1069 log_error("videorecorder", "Could not write video frame: %s", aError);
1070 }
1071 }
1072 else
1073 break;
1074 } while(true);
1075
1076 if(RecvResult && RecvResult != AVERROR_EOF)
1077 {
1078 char aError[AV_ERROR_MAX_STRING_SIZE];
1079 av_strerror(errnum: RecvResult, errbuf: aError, errbuf_size: sizeof(aError));
1080 log_error("videorecorder", "Could not finish recording: %s", aError);
1081 }
1082
1083 av_packet_free(pkt: &pPacket);
1084}
1085
1086void CVideo::CloseStream(COutputStream *pStream)
1087{
1088 avcodec_free_context(avctx: &pStream->m_pCodecContext);
1089
1090 for(auto *pFrame : pStream->m_vpFrames)
1091 av_frame_free(frame: &pFrame);
1092 pStream->m_vpFrames.clear();
1093
1094 for(auto *pFrame : pStream->m_vpTmpFrames)
1095 av_frame_free(frame: &pFrame);
1096 pStream->m_vpTmpFrames.clear();
1097
1098 for(auto *pSwsContext : pStream->m_vpSwsContexts)
1099 sws_freeContext(swsContext: pSwsContext);
1100 pStream->m_vpSwsContexts.clear();
1101
1102 for(auto *pSwrContext : pStream->m_vpSwrContexts)
1103 swr_free(s: &pSwrContext);
1104 pStream->m_vpSwrContexts.clear();
1105}
1106
1107#endif
1108