1#if defined(CONF_VIDEORECORDER)
2
3#include "video.h"
4
5#include <base/dbg.h>
6#include <base/io.h>
7#include <base/log.h>
8#include <base/str.h>
9
10#include <engine/graphics.h>
11#include <engine/shared/config.h>
12#include <engine/sound.h>
13#include <engine/storage.h>
14
15extern "C" {
16#include <libavutil/avutil.h>
17#include <libavutil/opt.h>
18#include <libswresample/swresample.h>
19#include <libswscale/swscale.h>
20};
21
22#include <chrono>
23#include <memory>
24#include <mutex>
25#include <thread>
26
27using namespace std::chrono_literals;
28
29// This code is mostly stolen from https://github.com/FFmpeg/FFmpeg/blob/master/doc/examples/muxing.c
30
31static const enum AVColorSpace COLOR_SPACE = AVCOL_SPC_BT709;
32// AVCodecContext->colorspace is an enum AVColorSpace but sws_getCoefficients
33// wants an SWS_CS_* macro. Both sets of constants follow H.273 numbering
34// and hence agree, but we assert that they're equal here to be sure.
35static_assert(COLOR_SPACE == SWS_CS_ITU709);
36
37static LEVEL AvLevelToLogLevel(int Level)
38{
39 switch(Level)
40 {
41 case AV_LOG_PANIC:
42 case AV_LOG_FATAL:
43 case AV_LOG_ERROR:
44 return LEVEL_ERROR;
45 case AV_LOG_WARNING:
46 return LEVEL_WARN;
47 case AV_LOG_INFO:
48 return LEVEL_INFO;
49 case AV_LOG_VERBOSE:
50 case AV_LOG_DEBUG:
51 return LEVEL_DEBUG;
52 case AV_LOG_TRACE:
53 return LEVEL_TRACE;
54 default:
55 dbg_assert_failed("invalid log level: %d", Level);
56 }
57}
58
59[[gnu::format(printf, 3, 0)]] static void AvLogCallback(void *pUser, int Level, const char *pFormat, va_list VarArgs)
60{
61 const LEVEL LogLevel = AvLevelToLogLevel(Level);
62 if(LogLevel <= LEVEL_INFO)
63 {
64 char aLog[4096]; // Longest log line length
65 int Length = str_format_v(buffer: aLog, buffer_size: sizeof(aLog), format: pFormat, args: VarArgs);
66 if(Length > 0)
67 {
68 if(aLog[Length - 1] == '\n')
69 {
70 aLog[Length - 1] = '\0';
71 }
72 log_log(level: LogLevel, sys: "videorecorder/libav", fmt: "%s", aLog);
73 }
74 }
75}
76
77void CVideo::Init()
78{
79 av_log_set_callback(callback: AvLogCallback);
80}
81
82CVideo::CVideo(IGraphics *pGraphics, ISound *pSound, IStorage *pStorage, int Width, int Height, const char *pName) :
83 m_pGraphics(pGraphics),
84 m_pStorage(pStorage),
85 m_pSound(pSound)
86{
87 m_pFormatContext = nullptr;
88 m_pFormat = nullptr;
89 m_pOptDict = nullptr;
90
91 m_pVideoCodec = nullptr;
92 m_pAudioCodec = nullptr;
93
94 m_Width = Width;
95 m_Height = Height;
96 str_copy(dst&: m_aName, src: pName);
97
98 m_FPS = g_Config.m_ClVideoRecorderFPS;
99
100 m_Recording = false;
101 m_Started = false;
102 m_Stopped = false;
103 m_ProcessingVideoFrame = 0;
104 m_ProcessingAudioFrame = 0;
105
106 m_HasAudio = m_pSound->IsSoundEnabled() && g_Config.m_ClVideoSndEnable;
107
108 dbg_assert(ms_pCurrentVideo == nullptr, "ms_pCurrentVideo is NOT set to nullptr while creating a new Video.");
109
110 ms_TickTime = time_freq() / m_FPS;
111 ms_pCurrentVideo = this;
112}
113
114CVideo::~CVideo()
115{
116 ms_pCurrentVideo = nullptr;
117}
118
119bool CVideo::Start()
120{
121 dbg_assert(!m_Started, "Already started");
122
123 // wait for the graphic thread to idle
124 m_pGraphics->WaitForIdle();
125
126 m_AudioStream = {};
127 m_VideoStream = {};
128
129 char aWholePath[IO_MAX_PATH_LENGTH];
130 IOHANDLE File = m_pStorage->OpenFile(pFilename: m_aName, Flags: IOFLAG_WRITE, Type: IStorage::TYPE_SAVE, pBuffer: aWholePath, BufferSize: sizeof(aWholePath));
131 if(File)
132 {
133 io_close(io: File);
134 }
135 else
136 {
137 log_error("videorecorder", "Could not open file '%s'", aWholePath);
138 return false;
139 }
140
141 const int FormatAllocResult = avformat_alloc_output_context2(ctx: &m_pFormatContext, oformat: nullptr, format_name: "mp4", filename: aWholePath);
142 if(FormatAllocResult < 0 || !m_pFormatContext)
143 {
144 char aError[AV_ERROR_MAX_STRING_SIZE];
145 av_strerror(errnum: FormatAllocResult, errbuf: aError, errbuf_size: sizeof(aError));
146 log_error("videorecorder", "Could not create format context: %s", aError);
147 return false;
148 }
149
150 m_pFormat = m_pFormatContext->oformat;
151
152#if defined(CONF_ARCH_IA32) || defined(CONF_ARCH_ARM) || defined(CONF_ARCH_WASM)
153 // use only the minimum of 2 threads on 32-bit to save memory
154 m_VideoThreads = 2;
155 m_AudioThreads = 2;
156#else
157 m_VideoThreads = std::thread::hardware_concurrency() + 2;
158 // audio gets a bit less
159 m_AudioThreads = (std::thread::hardware_concurrency() / 2) + 2;
160#endif
161
162 m_CurVideoThreadIndex = 0;
163 m_CurAudioThreadIndex = 0;
164
165 const size_t VideoBufferSize = (size_t)4 * m_Width * m_Height * sizeof(uint8_t);
166 m_vVideoBuffers.resize(new_size: m_VideoThreads);
167 for(size_t i = 0; i < m_VideoThreads; ++i)
168 {
169 m_vVideoBuffers[i].m_vBuffer.resize(new_size: VideoBufferSize);
170 }
171
172 m_vAudioBuffers.resize(new_size: m_AudioThreads);
173
174 /* Add the audio and video streams using the default format codecs
175 * and initialize the codecs. */
176 if(m_pFormat->video_codec != AV_CODEC_ID_NONE)
177 {
178 if(!AddStream(pStream: &m_VideoStream, pFormatContext: m_pFormatContext, ppCodec: &m_pVideoCodec, CodecId: m_pFormat->video_codec))
179 return false;
180 }
181 else
182 {
183 log_error("videorecorder", "Could not determine default video stream codec");
184 return false;
185 }
186
187 if(m_HasAudio)
188 {
189 if(m_pFormat->audio_codec != AV_CODEC_ID_NONE)
190 {
191 if(!AddStream(pStream: &m_AudioStream, pFormatContext: m_pFormatContext, ppCodec: &m_pAudioCodec, CodecId: m_pFormat->audio_codec))
192 return false;
193 }
194 else
195 {
196 log_error("videorecorder", "Could not determine default audio stream codec");
197 return false;
198 }
199 }
200
201 m_vpVideoThreads.resize(new_size: m_VideoThreads);
202 for(size_t i = 0; i < m_VideoThreads; ++i)
203 {
204 m_vpVideoThreads[i] = std::make_unique<CVideoRecorderThread>();
205 }
206 for(size_t i = 0; i < m_VideoThreads; ++i)
207 {
208 std::unique_lock<std::mutex> Lock(m_vpVideoThreads[i]->m_Mutex);
209 m_vpVideoThreads[i]->m_Thread = std::thread([this, i]() REQUIRES(!m_WriteLock) { RunVideoThread(ParentThreadIndex: i == 0 ? (m_VideoThreads - 1) : (i - 1), ThreadIndex: i); });
210 m_vpVideoThreads[i]->m_Cond.wait(lock&: Lock, p: [this, i]() -> bool { return m_vpVideoThreads[i]->m_Started; });
211 }
212
213 m_vpAudioThreads.resize(new_size: m_AudioThreads);
214 for(size_t i = 0; i < m_AudioThreads; ++i)
215 {
216 m_vpAudioThreads[i] = std::make_unique<CAudioRecorderThread>();
217 }
218 for(size_t i = 0; i < m_AudioThreads; ++i)
219 {
220 std::unique_lock<std::mutex> Lock(m_vpAudioThreads[i]->m_Mutex);
221 m_vpAudioThreads[i]->m_Thread = std::thread([this, i]() REQUIRES(!m_WriteLock) { RunAudioThread(ParentThreadIndex: i == 0 ? (m_AudioThreads - 1) : (i - 1), ThreadIndex: i); });
222 m_vpAudioThreads[i]->m_Cond.wait(lock&: Lock, p: [this, i]() -> bool { return m_vpAudioThreads[i]->m_Started; });
223 }
224
225 /* Now that all the parameters are set, we can open the audio and
226 * video codecs and allocate the necessary encode buffers. */
227 if(!OpenVideo())
228 return false;
229
230 if(m_HasAudio && !OpenAudio())
231 return false;
232
233 /* open the output file, if needed */
234 if(!(m_pFormat->flags & AVFMT_NOFILE))
235 {
236 const int OpenResult = avio_open(s: &m_pFormatContext->pb, url: aWholePath, AVIO_FLAG_WRITE);
237 if(OpenResult < 0)
238 {
239 char aError[AV_ERROR_MAX_STRING_SIZE];
240 av_strerror(errnum: OpenResult, errbuf: aError, errbuf_size: sizeof(aError));
241 log_error("videorecorder", "Could not open file '%s': %s", aWholePath, aError);
242 return false;
243 }
244 }
245
246 m_VideoStream.m_vpSwsContexts.reserve(n: m_VideoThreads);
247
248 for(size_t i = 0; i < m_VideoThreads; ++i)
249 {
250 if(m_VideoStream.m_vpSwsContexts.size() <= i)
251 m_VideoStream.m_vpSwsContexts.emplace_back(args: nullptr);
252
253 if(!m_VideoStream.m_vpSwsContexts[i])
254 {
255 m_VideoStream.m_vpSwsContexts[i] = sws_getCachedContext(
256 context: m_VideoStream.m_vpSwsContexts[i],
257 srcW: m_VideoStream.m_pCodecContext->width, srcH: m_VideoStream.m_pCodecContext->height, srcFormat: AV_PIX_FMT_RGBA,
258 dstW: m_VideoStream.m_pCodecContext->width, dstH: m_VideoStream.m_pCodecContext->height, dstFormat: AV_PIX_FMT_YUV420P,
259 flags: SWS_FULL_CHR_H_INT | SWS_FULL_CHR_H_INP | SWS_ACCURATE_RND | SWS_BITEXACT, srcFilter: nullptr, dstFilter: nullptr, param: nullptr);
260
261 const int *pMatrixCoefficients = sws_getCoefficients(colorspace: COLOR_SPACE);
262 sws_setColorspaceDetails(c: m_VideoStream.m_vpSwsContexts[i], inv_table: pMatrixCoefficients, srcRange: 0, table: pMatrixCoefficients, dstRange: 0, brightness: 0, contrast: 1 << 16, saturation: 1 << 16);
263 }
264 }
265
266 /* Write the stream header, if any. */
267 const int WriteHeaderResult = avformat_write_header(s: m_pFormatContext, options: &m_pOptDict);
268 if(WriteHeaderResult < 0)
269 {
270 char aError[AV_ERROR_MAX_STRING_SIZE];
271 av_strerror(errnum: WriteHeaderResult, errbuf: aError, errbuf_size: sizeof(aError));
272 log_error("videorecorder", "Could not write header: %s", aError);
273 return false;
274 }
275
276 m_Recording = true;
277 m_Started = true;
278 m_Stopped = false;
279 ms_Time = time_get();
280 return true;
281}
282
283void CVideo::Pause(bool Pause)
284{
285 if(ms_pCurrentVideo)
286 m_Recording = !Pause;
287}
288
289void CVideo::Stop()
290{
291 dbg_assert(!m_Stopped, "Already stopped");
292 m_Stopped = true;
293
294 m_pGraphics->WaitForIdle();
295
296 for(auto &pVideoThread : m_vpVideoThreads)
297 {
298 {
299 std::unique_lock<std::mutex> Lock(pVideoThread->m_Mutex);
300 pVideoThread->m_Finished = true;
301 pVideoThread->m_Cond.notify_all();
302 }
303
304 pVideoThread->m_Thread.join();
305 }
306 m_vpVideoThreads.clear();
307
308 for(auto &pAudioThread : m_vpAudioThreads)
309 {
310 {
311 std::unique_lock<std::mutex> Lock(pAudioThread->m_Mutex);
312 pAudioThread->m_Finished = true;
313 pAudioThread->m_Cond.notify_all();
314 }
315
316 pAudioThread->m_Thread.join();
317 }
318 m_vpAudioThreads.clear();
319
320 while(m_ProcessingVideoFrame > 0 || m_ProcessingAudioFrame > 0)
321 std::this_thread::sleep_for(rtime: 10us);
322
323 m_Recording = false;
324
325 FinishFrames(pStream: &m_VideoStream);
326
327 if(m_HasAudio)
328 FinishFrames(pStream: &m_AudioStream);
329
330 if(m_pFormatContext && m_Started)
331 av_write_trailer(s: m_pFormatContext);
332
333 CloseStream(pStream: &m_VideoStream);
334
335 if(m_HasAudio)
336 CloseStream(pStream: &m_AudioStream);
337
338 if(m_pFormatContext)
339 {
340 if(!(m_pFormat->flags & AVFMT_NOFILE))
341 avio_closep(s: &m_pFormatContext->pb);
342
343 avformat_free_context(s: m_pFormatContext);
344 }
345
346 ISound *volatile pSound = m_pSound;
347
348 pSound->PauseAudioDevice();
349 delete ms_pCurrentVideo;
350 pSound->UnpauseAudioDevice();
351}
352
353void CVideo::NextVideoFrameThread()
354{
355 if(m_Recording)
356 {
357 m_VideoFrameIndex += 1;
358 if(m_VideoFrameIndex >= 2)
359 {
360 m_ProcessingVideoFrame.fetch_add(i: 1);
361
362 size_t NextVideoThreadIndex = m_CurVideoThreadIndex + 1;
363 if(NextVideoThreadIndex == m_VideoThreads)
364 NextVideoThreadIndex = 0;
365
366 // always wait for the next video thread too, to prevent a dead lock
367 {
368 auto *pVideoThread = m_vpVideoThreads[NextVideoThreadIndex].get();
369 std::unique_lock<std::mutex> Lock(pVideoThread->m_Mutex);
370
371 if(pVideoThread->m_HasVideoFrame)
372 {
373 pVideoThread->m_Cond.wait(lock&: Lock, p: [&pVideoThread]() -> bool { return !pVideoThread->m_HasVideoFrame; });
374 }
375 }
376
377 // after reading the graphic libraries' frame buffer, go threaded
378 {
379 auto *pVideoThread = m_vpVideoThreads[m_CurVideoThreadIndex].get();
380 std::unique_lock<std::mutex> Lock(pVideoThread->m_Mutex);
381
382 if(pVideoThread->m_HasVideoFrame)
383 {
384 pVideoThread->m_Cond.wait(lock&: Lock, p: [&pVideoThread]() -> bool { return !pVideoThread->m_HasVideoFrame; });
385 }
386
387 UpdateVideoBufferFromGraphics(ThreadIndex: m_CurVideoThreadIndex);
388
389 pVideoThread->m_HasVideoFrame = true;
390 {
391 std::unique_lock<std::mutex> LockParent(pVideoThread->m_VideoFillMutex);
392 pVideoThread->m_VideoFrameToFill = m_VideoFrameIndex;
393 }
394 pVideoThread->m_Cond.notify_all();
395 }
396
397 ++m_CurVideoThreadIndex;
398 if(m_CurVideoThreadIndex == m_VideoThreads)
399 m_CurVideoThreadIndex = 0;
400 }
401 }
402}
403
404void CVideo::NextVideoFrame()
405{
406 if(m_Recording)
407 {
408 ms_Time += ms_TickTime;
409 ms_LocalTime = (ms_Time - ms_LocalStartTime) / (float)time_freq();
410 }
411}
412
413void CVideo::NextAudioFrameTimeline(ISoundMixFunc Mix)
414{
415 if(m_Recording && m_HasAudio)
416 {
417 double SamplesPerFrame = (double)m_AudioStream.m_pCodecContext->sample_rate / m_FPS;
418 while(m_AudioStream.m_SamplesFrameCount >= m_AudioStream.m_SamplesCount)
419 {
420 NextAudioFrame(Mix);
421 }
422 m_AudioStream.m_SamplesFrameCount += SamplesPerFrame;
423 }
424}
425
426void CVideo::NextAudioFrame(ISoundMixFunc Mix)
427{
428 if(m_Recording && m_HasAudio)
429 {
430 m_AudioFrameIndex += 1;
431
432 m_ProcessingAudioFrame.fetch_add(i: 1);
433
434 size_t NextAudioThreadIndex = m_CurAudioThreadIndex + 1;
435 if(NextAudioThreadIndex == m_AudioThreads)
436 NextAudioThreadIndex = 0;
437
438 // always wait for the next Audio thread too, to prevent a dead lock
439
440 {
441 auto *pAudioThread = m_vpAudioThreads[NextAudioThreadIndex].get();
442 std::unique_lock<std::mutex> Lock(pAudioThread->m_Mutex);
443
444 if(pAudioThread->m_HasAudioFrame)
445 {
446 pAudioThread->m_Cond.wait(lock&: Lock, p: [&pAudioThread]() -> bool { return !pAudioThread->m_HasAudioFrame; });
447 }
448 }
449
450 // after reading the graphic libraries' frame buffer, go threaded
451 {
452 auto *pAudioThread = m_vpAudioThreads[m_CurAudioThreadIndex].get();
453
454 std::unique_lock<std::mutex> Lock(pAudioThread->m_Mutex);
455
456 if(pAudioThread->m_HasAudioFrame)
457 {
458 pAudioThread->m_Cond.wait(lock&: Lock, p: [&pAudioThread]() -> bool { return !pAudioThread->m_HasAudioFrame; });
459 }
460
461 Mix(m_vAudioBuffers[m_CurAudioThreadIndex].m_aBuffer, std::size(m_vAudioBuffers[m_CurAudioThreadIndex].m_aBuffer) / 2 / 2); // two channels
462
463 int64_t DstNbSamples = av_rescale_rnd(
464 a: swr_get_delay(s: m_AudioStream.m_vpSwrContexts[m_CurAudioThreadIndex], base: m_AudioStream.m_pCodecContext->sample_rate) +
465 m_AudioStream.m_vpFrames[m_CurAudioThreadIndex]->nb_samples,
466 b: m_AudioStream.m_pCodecContext->sample_rate,
467 c: m_AudioStream.m_pCodecContext->sample_rate, rnd: AV_ROUND_UP);
468
469 pAudioThread->m_SampleCountStart = m_AudioStream.m_SamplesCount;
470 m_AudioStream.m_SamplesCount += DstNbSamples;
471
472 pAudioThread->m_HasAudioFrame = true;
473 {
474 std::unique_lock<std::mutex> LockParent(pAudioThread->m_AudioFillMutex);
475 pAudioThread->m_AudioFrameToFill = m_AudioFrameIndex;
476 }
477 pAudioThread->m_Cond.notify_all();
478 }
479
480 ++m_CurAudioThreadIndex;
481 if(m_CurAudioThreadIndex == m_AudioThreads)
482 m_CurAudioThreadIndex = 0;
483 }
484}
485
486void CVideo::RunAudioThread(size_t ParentThreadIndex, size_t ThreadIndex)
487{
488 auto *pThreadData = m_vpAudioThreads[ThreadIndex].get();
489 auto *pParentThreadData = m_vpAudioThreads[ParentThreadIndex].get();
490 std::unique_lock<std::mutex> Lock(pThreadData->m_Mutex);
491 pThreadData->m_Started = true;
492 pThreadData->m_Cond.notify_all();
493
494 while(!pThreadData->m_Finished)
495 {
496 pThreadData->m_Cond.wait(lock&: Lock, p: [&pThreadData]() -> bool { return pThreadData->m_HasAudioFrame || pThreadData->m_Finished; });
497 pThreadData->m_Cond.notify_all();
498
499 if(pThreadData->m_HasAudioFrame)
500 {
501 FillAudioFrame(ThreadIndex);
502 // check if we need to wait for the parent to finish
503 {
504 std::unique_lock<std::mutex> LockParent(pParentThreadData->m_AudioFillMutex);
505 if(pParentThreadData->m_AudioFrameToFill != 0 && pThreadData->m_AudioFrameToFill >= pParentThreadData->m_AudioFrameToFill)
506 {
507 // wait for the parent to finish its frame
508 pParentThreadData->m_AudioFillCond.wait(lock&: LockParent, p: [&pParentThreadData]() -> bool { return pParentThreadData->m_AudioFrameToFill == 0; });
509 }
510 }
511 {
512 std::unique_lock<std::mutex> LockAudio(pThreadData->m_AudioFillMutex);
513
514 {
515 const CLockScope LockScope(m_WriteLock);
516 m_AudioStream.m_vpFrames[ThreadIndex]->pts = av_rescale_q(a: pThreadData->m_SampleCountStart, bq: AVRational{.num: 1, .den: m_AudioStream.m_pCodecContext->sample_rate}, cq: m_AudioStream.m_pCodecContext->time_base);
517 WriteFrame(pStream: &m_AudioStream, ThreadIndex);
518 }
519
520 pThreadData->m_AudioFrameToFill = 0;
521 pThreadData->m_AudioFillCond.notify_all();
522 pThreadData->m_Cond.notify_all();
523 }
524 m_ProcessingAudioFrame.fetch_sub(i: 1);
525
526 pThreadData->m_HasAudioFrame = false;
527 }
528 }
529}
530
531void CVideo::FillAudioFrame(size_t ThreadIndex)
532{
533 const int FillArrayResult = av_samples_fill_arrays(
534 audio_data: (uint8_t **)m_AudioStream.m_vpTmpFrames[ThreadIndex]->data,
535 linesize: nullptr, // pointer to linesize (int*)
536 buf: (const uint8_t *)m_vAudioBuffers[ThreadIndex].m_aBuffer,
537 nb_channels: 2, // channels
538 nb_samples: m_AudioStream.m_vpTmpFrames[ThreadIndex]->nb_samples,
539 sample_fmt: AV_SAMPLE_FMT_S16,
540 align: 0 // align
541 );
542 if(FillArrayResult < 0)
543 {
544 char aError[AV_ERROR_MAX_STRING_SIZE];
545 av_strerror(errnum: FillArrayResult, errbuf: aError, errbuf_size: sizeof(aError));
546 log_error("videorecorder", "Could not fill audio frame: %s", aError);
547 return;
548 }
549
550 const int MakeWriteableResult = av_frame_make_writable(frame: m_AudioStream.m_vpFrames[ThreadIndex]);
551 if(MakeWriteableResult < 0)
552 {
553 char aError[AV_ERROR_MAX_STRING_SIZE];
554 av_strerror(errnum: MakeWriteableResult, errbuf: aError, errbuf_size: sizeof(aError));
555 log_error("videorecorder", "Could not make audio frame writeable: %s", aError);
556 return;
557 }
558
559 /* convert to destination format */
560 const int ConvertResult = swr_convert(
561 s: m_AudioStream.m_vpSwrContexts[ThreadIndex],
562 out: m_AudioStream.m_vpFrames[ThreadIndex]->data,
563 out_count: m_AudioStream.m_vpFrames[ThreadIndex]->nb_samples,
564 in: (const uint8_t **)m_AudioStream.m_vpTmpFrames[ThreadIndex]->data,
565 in_count: m_AudioStream.m_vpTmpFrames[ThreadIndex]->nb_samples);
566 if(ConvertResult < 0)
567 {
568 char aError[AV_ERROR_MAX_STRING_SIZE];
569 av_strerror(errnum: ConvertResult, errbuf: aError, errbuf_size: sizeof(aError));
570 log_error("videorecorder", "Could not convert audio frame: %s", aError);
571 return;
572 }
573}
574
575void CVideo::RunVideoThread(size_t ParentThreadIndex, size_t ThreadIndex)
576{
577 auto *pThreadData = m_vpVideoThreads[ThreadIndex].get();
578 auto *pParentThreadData = m_vpVideoThreads[ParentThreadIndex].get();
579 std::unique_lock<std::mutex> Lock(pThreadData->m_Mutex);
580 pThreadData->m_Started = true;
581 pThreadData->m_Cond.notify_all();
582
583 while(!pThreadData->m_Finished)
584 {
585 pThreadData->m_Cond.wait(lock&: Lock, p: [&pThreadData]() -> bool { return pThreadData->m_HasVideoFrame || pThreadData->m_Finished; });
586 pThreadData->m_Cond.notify_all();
587
588 if(pThreadData->m_HasVideoFrame)
589 {
590 FillVideoFrame(ThreadIndex);
591 // check if we need to wait for the parent to finish
592 {
593 std::unique_lock<std::mutex> LockParent(pParentThreadData->m_VideoFillMutex);
594 if(pParentThreadData->m_VideoFrameToFill != 0 && pThreadData->m_VideoFrameToFill >= pParentThreadData->m_VideoFrameToFill)
595 {
596 // wait for the parent to finish its frame
597 pParentThreadData->m_VideoFillCond.wait(lock&: LockParent, p: [&pParentThreadData]() -> bool { return pParentThreadData->m_VideoFrameToFill == 0; });
598 }
599 }
600 {
601 std::unique_lock<std::mutex> LockVideo(pThreadData->m_VideoFillMutex);
602 {
603 const CLockScope LockScope(m_WriteLock);
604#if LIBAVCODEC_VERSION_MAJOR >= 60
605 m_VideoStream.m_vpFrames[ThreadIndex]->pts = m_VideoStream.m_pCodecContext->frame_num;
606#else
607 m_VideoStream.m_vpFrames[ThreadIndex]->pts = m_VideoStream.m_pCodecContext->frame_number;
608#endif
609 WriteFrame(pStream: &m_VideoStream, ThreadIndex);
610 }
611
612 pThreadData->m_VideoFrameToFill = 0;
613 pThreadData->m_VideoFillCond.notify_all();
614 pThreadData->m_Cond.notify_all();
615 }
616 m_ProcessingVideoFrame.fetch_sub(i: 1);
617
618 pThreadData->m_HasVideoFrame = false;
619 }
620 }
621}
622
623void CVideo::FillVideoFrame(size_t ThreadIndex)
624{
625 const int InLineSize = 4 * m_VideoStream.m_pCodecContext->width;
626 auto *pRGBAData = m_vVideoBuffers[ThreadIndex].m_vBuffer.data();
627 sws_scale(c: m_VideoStream.m_vpSwsContexts[ThreadIndex], srcSlice: (const uint8_t *const *)&pRGBAData, srcStride: &InLineSize, srcSliceY: 0,
628 srcSliceH: m_VideoStream.m_pCodecContext->height, dst: m_VideoStream.m_vpFrames[ThreadIndex]->data, dstStride: m_VideoStream.m_vpFrames[ThreadIndex]->linesize);
629}
630
631void CVideo::UpdateVideoBufferFromGraphics(size_t ThreadIndex)
632{
633 uint32_t Width;
634 uint32_t Height;
635 CImageInfo::EImageFormat Format;
636 m_pGraphics->GetReadPresentedImageDataFuncUnsafe()(Width, Height, Format, m_vVideoBuffers[ThreadIndex].m_vBuffer);
637 dbg_assert((int)Width == m_Width && (int)Height == m_Height, "Size mismatch between video (%d x %d) and graphics (%d x %d)", m_Width, m_Height, Width, Height);
638 dbg_assert(Format == CImageInfo::FORMAT_RGBA, "Unexpected image format %d", (int)Format);
639}
640
641AVFrame *CVideo::AllocPicture(enum AVPixelFormat PixFmt, int Width, int Height)
642{
643 AVFrame *pPicture = av_frame_alloc();
644 if(!pPicture)
645 {
646 log_error("videorecorder", "Could not allocate video frame");
647 return nullptr;
648 }
649
650 pPicture->format = PixFmt;
651 pPicture->width = Width;
652 pPicture->height = Height;
653
654 /* allocate the buffers for the frame data */
655 const int FrameBufferAllocResult = av_frame_get_buffer(frame: pPicture, align: 32);
656 if(FrameBufferAllocResult < 0)
657 {
658 char aError[AV_ERROR_MAX_STRING_SIZE];
659 av_strerror(errnum: FrameBufferAllocResult, errbuf: aError, errbuf_size: sizeof(aError));
660 log_error("videorecorder", "Could not allocate video frame buffer: %s", aError);
661 return nullptr;
662 }
663
664 return pPicture;
665}
666
667AVFrame *CVideo::AllocAudioFrame(enum AVSampleFormat SampleFmt, uint64_t ChannelLayout, int SampleRate, int NbSamples)
668{
669 AVFrame *pFrame = av_frame_alloc();
670 if(!pFrame)
671 {
672 log_error("videorecorder", "Could not allocate audio frame");
673 return nullptr;
674 }
675
676 pFrame->format = SampleFmt;
677#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100)
678 dbg_assert(av_channel_layout_from_mask(&pFrame->ch_layout, ChannelLayout) == 0, "Failed to set channel layout");
679#else
680 pFrame->channel_layout = ChannelLayout;
681#endif
682 pFrame->sample_rate = SampleRate;
683 pFrame->nb_samples = NbSamples;
684
685 if(NbSamples)
686 {
687 const int FrameBufferAllocResult = av_frame_get_buffer(frame: pFrame, align: 0);
688 if(FrameBufferAllocResult < 0)
689 {
690 char aError[AV_ERROR_MAX_STRING_SIZE];
691 av_strerror(errnum: FrameBufferAllocResult, errbuf: aError, errbuf_size: sizeof(aError));
692 log_error("videorecorder", "Could not allocate audio frame buffer: %s", aError);
693 return nullptr;
694 }
695 }
696
697 return pFrame;
698}
699
700bool CVideo::OpenVideo()
701{
702 AVCodecContext *pContext = m_VideoStream.m_pCodecContext;
703 AVDictionary *pOptions = nullptr;
704 av_dict_copy(dst: &pOptions, src: m_pOptDict, flags: 0);
705
706 /* open the codec */
707 const int VideoOpenResult = avcodec_open2(avctx: pContext, codec: m_pVideoCodec, options: &pOptions);
708 av_dict_free(m: &pOptions);
709 if(VideoOpenResult < 0)
710 {
711 char aError[AV_ERROR_MAX_STRING_SIZE];
712 av_strerror(errnum: VideoOpenResult, errbuf: aError, errbuf_size: sizeof(aError));
713 log_error("videorecorder", "Could not open video codec: %s", aError);
714 return false;
715 }
716
717 m_VideoStream.m_vpFrames.clear();
718 m_VideoStream.m_vpFrames.reserve(n: m_VideoThreads);
719
720 /* allocate and init a re-usable frame */
721 for(size_t i = 0; i < m_VideoThreads; ++i)
722 {
723 m_VideoStream.m_vpFrames.emplace_back(args: nullptr);
724 m_VideoStream.m_vpFrames[i] = AllocPicture(PixFmt: pContext->pix_fmt, Width: pContext->width, Height: pContext->height);
725 if(!m_VideoStream.m_vpFrames[i])
726 {
727 return false;
728 }
729 }
730
731 /* If the output format is not YUV420P, then a temporary YUV420P
732 * picture is needed too. It is then converted to the required
733 * output format. */
734 m_VideoStream.m_vpTmpFrames.clear();
735 m_VideoStream.m_vpTmpFrames.reserve(n: m_VideoThreads);
736
737 if(pContext->pix_fmt != AV_PIX_FMT_YUV420P)
738 {
739 /* allocate and init a re-usable frame */
740 for(size_t i = 0; i < m_VideoThreads; ++i)
741 {
742 m_VideoStream.m_vpTmpFrames.emplace_back(args: nullptr);
743 m_VideoStream.m_vpTmpFrames[i] = AllocPicture(PixFmt: AV_PIX_FMT_YUV420P, Width: pContext->width, Height: pContext->height);
744 if(!m_VideoStream.m_vpTmpFrames[i])
745 {
746 return false;
747 }
748 }
749 }
750
751 /* copy the stream parameters to the muxer */
752 const int AudioStreamCopyResult = avcodec_parameters_from_context(par: m_VideoStream.m_pStream->codecpar, codec: pContext);
753 if(AudioStreamCopyResult < 0)
754 {
755 char aError[AV_ERROR_MAX_STRING_SIZE];
756 av_strerror(errnum: AudioStreamCopyResult, errbuf: aError, errbuf_size: sizeof(aError));
757 log_error("videorecorder", "Could not copy video stream parameters: %s", aError);
758 return false;
759 }
760 m_VideoFrameIndex = 0;
761 return true;
762}
763
764bool CVideo::OpenAudio()
765{
766 AVCodecContext *pContext = m_AudioStream.m_pCodecContext;
767 AVDictionary *pOptions = nullptr;
768 av_dict_copy(dst: &pOptions, src: m_pOptDict, flags: 0);
769
770 /* open it */
771 const int AudioOpenResult = avcodec_open2(avctx: pContext, codec: m_pAudioCodec, options: &pOptions);
772 av_dict_free(m: &pOptions);
773 if(AudioOpenResult < 0)
774 {
775 char aError[AV_ERROR_MAX_STRING_SIZE];
776 av_strerror(errnum: AudioOpenResult, errbuf: aError, errbuf_size: sizeof(aError));
777 log_error("videorecorder", "Could not open audio codec: %s", aError);
778 return false;
779 }
780
781 int NbSamples;
782 if(pContext->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)
783 NbSamples = 10000;
784 else
785 NbSamples = pContext->frame_size;
786
787 m_AudioStream.m_vpFrames.clear();
788 m_AudioStream.m_vpFrames.reserve(n: m_AudioThreads);
789
790 m_AudioStream.m_vpTmpFrames.clear();
791 m_AudioStream.m_vpTmpFrames.reserve(n: m_AudioThreads);
792
793 /* allocate and init a re-usable frame */
794 for(size_t i = 0; i < m_AudioThreads; ++i)
795 {
796 m_AudioStream.m_vpFrames.emplace_back(args: nullptr);
797#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100)
798 m_AudioStream.m_vpFrames[i] = AllocAudioFrame(SampleFmt: pContext->sample_fmt, ChannelLayout: pContext->ch_layout.u.mask, SampleRate: pContext->sample_rate, NbSamples);
799#else
800 m_AudioStream.m_vpFrames[i] = AllocAudioFrame(pContext->sample_fmt, pContext->channel_layout, pContext->sample_rate, NbSamples);
801#endif
802 if(!m_AudioStream.m_vpFrames[i])
803 {
804 return false;
805 }
806
807 m_AudioStream.m_vpTmpFrames.emplace_back(args: nullptr);
808 m_AudioStream.m_vpTmpFrames[i] = AllocAudioFrame(SampleFmt: AV_SAMPLE_FMT_S16, AV_CH_LAYOUT_STEREO, SampleRate: m_pSound->MixingRate(), NbSamples);
809 if(!m_AudioStream.m_vpTmpFrames[i])
810 {
811 return false;
812 }
813 }
814
815 /* copy the stream parameters to the muxer */
816 const int AudioStreamCopyResult = avcodec_parameters_from_context(par: m_AudioStream.m_pStream->codecpar, codec: pContext);
817 if(AudioStreamCopyResult < 0)
818 {
819 char aError[AV_ERROR_MAX_STRING_SIZE];
820 av_strerror(errnum: AudioStreamCopyResult, errbuf: aError, errbuf_size: sizeof(aError));
821 log_error("videorecorder", "Could not copy audio stream parameters: %s", aError);
822 return false;
823 }
824
825 /* create resampling context */
826 m_AudioStream.m_vpSwrContexts.clear();
827 m_AudioStream.m_vpSwrContexts.resize(new_size: m_AudioThreads);
828 for(size_t i = 0; i < m_AudioThreads; ++i)
829 {
830 m_AudioStream.m_vpSwrContexts[i] = swr_alloc();
831 if(!m_AudioStream.m_vpSwrContexts[i])
832 {
833 log_error("videorecorder", "Could not allocate resampling context");
834 return false;
835 }
836
837 /* set options */
838#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100)
839 dbg_assert(av_opt_set_chlayout(m_AudioStream.m_vpSwrContexts[i], "in_chlayout", &pContext->ch_layout, 0) == 0, "invalid option");
840#else
841 dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrContexts[i], "in_channel_count", pContext->channels, 0) == 0, "invalid option");
842#endif
843 if(av_opt_set_int(obj: m_AudioStream.m_vpSwrContexts[i], name: "in_sample_rate", val: m_pSound->MixingRate(), search_flags: 0) != 0)
844 {
845 log_error("videorecorder", "Could not set audio sample rate to %d", m_pSound->MixingRate());
846 return false;
847 }
848 dbg_assert(av_opt_set_sample_fmt(m_AudioStream.m_vpSwrContexts[i], "in_sample_fmt", AV_SAMPLE_FMT_S16, 0) == 0, "invalid option");
849#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100)
850 dbg_assert(av_opt_set_chlayout(m_AudioStream.m_vpSwrContexts[i], "out_chlayout", &pContext->ch_layout, 0) == 0, "invalid option");
851#else
852 dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrContexts[i], "out_channel_count", pContext->channels, 0) == 0, "invalid option");
853#endif
854 dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrContexts[i], "out_sample_rate", pContext->sample_rate, 0) == 0, "invalid option");
855 dbg_assert(av_opt_set_sample_fmt(m_AudioStream.m_vpSwrContexts[i], "out_sample_fmt", pContext->sample_fmt, 0) == 0, "invalid option");
856
857 /* initialize the resampling context */
858 const int ResamplingContextInitResult = swr_init(s: m_AudioStream.m_vpSwrContexts[i]);
859 if(ResamplingContextInitResult < 0)
860 {
861 char aError[AV_ERROR_MAX_STRING_SIZE];
862 av_strerror(errnum: ResamplingContextInitResult, errbuf: aError, errbuf_size: sizeof(aError));
863 log_error("videorecorder", "Could not initialize resampling context: %s", aError);
864 return false;
865 }
866 }
867
868 m_AudioFrameIndex = 0;
869 return true;
870}
871
872/* Add an output stream. */
873bool CVideo::AddStream(COutputStream *pStream, AVFormatContext *pFormatContext, const AVCodec **ppCodec, enum AVCodecID CodecId) const
874{
875 /* find the encoder */
876 *ppCodec = avcodec_find_encoder(id: CodecId);
877 if(!(*ppCodec))
878 {
879 log_error("videorecorder", "Could not find encoder for codec '%s'", avcodec_get_name(CodecId));
880 return false;
881 }
882
883 pStream->m_pStream = avformat_new_stream(s: pFormatContext, c: nullptr);
884 if(!pStream->m_pStream)
885 {
886 log_error("videorecorder", "Could not allocate stream");
887 return false;
888 }
889 pStream->m_pStream->id = pFormatContext->nb_streams - 1;
890 AVCodecContext *pContext = avcodec_alloc_context3(codec: *ppCodec);
891 if(!pContext)
892 {
893 log_error("videorecorder", "Could not allocate encoding context");
894 return false;
895 }
896 pStream->m_pCodecContext = pContext;
897
898#if defined(CONF_ARCH_IA32) || defined(CONF_ARCH_ARM) || defined(CONF_ARCH_WASM)
899 // use only 1 ffmpeg thread on 32-bit to save memory
900 pContext->thread_count = 1;
901#endif
902
903 switch((*ppCodec)->type)
904 {
905 case AVMEDIA_TYPE_AUDIO:
906 {
907 const AVSampleFormat *pSampleFormats = nullptr;
908 const int *pSampleRates = nullptr;
909#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(61, 13, 100)
910 avcodec_get_supported_config(avctx: pContext, codec: *ppCodec, config: AV_CODEC_CONFIG_SAMPLE_FORMAT, flags: 0, out_configs: (const void **)&pSampleFormats, out_num_configs: nullptr);
911 avcodec_get_supported_config(avctx: pContext, codec: *ppCodec, config: AV_CODEC_CONFIG_SAMPLE_RATE, flags: 0, out_configs: (const void **)&pSampleRates, out_num_configs: nullptr);
912#else
913 pSampleFormats = (*ppCodec)->sample_fmts;
914 pSampleRates = (*ppCodec)->supported_samplerates;
915#endif
916 pContext->sample_fmt = pSampleFormats ? pSampleFormats[0] : AV_SAMPLE_FMT_FLTP;
917 if(pSampleRates)
918 {
919 pContext->sample_rate = pSampleRates[0];
920 for(int i = 0; pSampleRates[i]; i++)
921 {
922 if(pSampleRates[i] == m_pSound->MixingRate())
923 {
924 pContext->sample_rate = m_pSound->MixingRate();
925 break;
926 }
927 }
928 }
929 else
930 {
931 pContext->sample_rate = m_pSound->MixingRate();
932 }
933 pContext->bit_rate = pContext->sample_rate * 2 * 16;
934#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100)
935 dbg_assert(av_channel_layout_from_mask(&pContext->ch_layout, AV_CH_LAYOUT_STEREO) == 0, "Failed to set channel layout");
936#else
937 pContext->channels = 2;
938 pContext->channel_layout = AV_CH_LAYOUT_STEREO;
939#endif
940
941 pStream->m_pStream->time_base.num = 1;
942 pStream->m_pStream->time_base.den = pContext->sample_rate;
943 break;
944 }
945
946 case AVMEDIA_TYPE_VIDEO:
947 pContext->codec_id = CodecId;
948
949 pContext->bit_rate = 400000;
950 /* Resolution must be a multiple of two. */
951 pContext->width = m_Width;
952 pContext->height = m_Height % 2 == 0 ? m_Height : m_Height - 1;
953 /* timebase: This is the fundamental unit of time (in seconds) in terms
954 * of which frame timestamps are represented. For fixed-fps content,
955 * timebase should be 1/framerate and timestamp increments should be
956 * identical to 1. */
957 pStream->m_pStream->time_base.num = 1;
958 pStream->m_pStream->time_base.den = m_FPS;
959 pContext->time_base = pStream->m_pStream->time_base;
960
961 pContext->gop_size = 12; /* emit one intra frame every twelve frames at most */
962 pContext->pix_fmt = AV_PIX_FMT_YUV420P;
963 pContext->colorspace = COLOR_SPACE;
964 if(pContext->codec_id == AV_CODEC_ID_MPEG2VIDEO)
965 {
966 /* just for testing, we also add B-frames */
967 pContext->max_b_frames = 2;
968 }
969 if(pContext->codec_id == AV_CODEC_ID_MPEG1VIDEO)
970 {
971 /* Needed to avoid using macroblocks in which some coeffs overflow.
972 * This does not happen with normal video, it just happens here as
973 * the motion of the chroma plane does not match the luma plane. */
974 pContext->mb_decision = 2;
975 }
976 if(CodecId == AV_CODEC_ID_H264)
977 {
978 static const char *s_apPresets[10] = {"ultrafast", "superfast", "veryfast", "faster", "fast", "medium", "slow", "slower", "veryslow", "placebo"};
979 dbg_assert(g_Config.m_ClVideoX264Preset < (int)std::size(s_apPresets), "preset index invalid: %d", g_Config.m_ClVideoX264Preset);
980 dbg_assert(av_opt_set(pContext->priv_data, "preset", s_apPresets[g_Config.m_ClVideoX264Preset], 0) == 0, "invalid option");
981 dbg_assert(av_opt_set_int(pContext->priv_data, "crf", g_Config.m_ClVideoX264Crf, 0) == 0, "invalid option");
982 }
983 break;
984
985 default:
986 break;
987 }
988
989 /* Some formats want stream headers to be separate. */
990 if(pFormatContext->oformat->flags & AVFMT_GLOBALHEADER)
991 pContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
992
993 return true;
994}
995
996void CVideo::WriteFrame(COutputStream *pStream, size_t ThreadIndex)
997{
998 AVPacket *pPacket = av_packet_alloc();
999 if(pPacket == nullptr)
1000 {
1001 log_error("videorecorder", "Could not allocate packet");
1002 return;
1003 }
1004
1005 pPacket->data = nullptr;
1006 pPacket->size = 0;
1007
1008 avcodec_send_frame(avctx: pStream->m_pCodecContext, frame: pStream->m_vpFrames[ThreadIndex]);
1009 int RecvResult = 0;
1010 do
1011 {
1012 RecvResult = avcodec_receive_packet(avctx: pStream->m_pCodecContext, avpkt: pPacket);
1013 if(!RecvResult)
1014 {
1015 /* rescale output packet timestamp values from codec to stream timebase */
1016 av_packet_rescale_ts(pkt: pPacket, tb_src: pStream->m_pCodecContext->time_base, tb_dst: pStream->m_pStream->time_base);
1017 pPacket->stream_index = pStream->m_pStream->index;
1018
1019 const int WriteFrameResult = av_interleaved_write_frame(s: m_pFormatContext, pkt: pPacket);
1020 if(WriteFrameResult < 0)
1021 {
1022 char aError[AV_ERROR_MAX_STRING_SIZE];
1023 av_strerror(errnum: WriteFrameResult, errbuf: aError, errbuf_size: sizeof(aError));
1024 log_error("videorecorder", "Could not write video frame: %s", aError);
1025 }
1026 }
1027 else
1028 break;
1029 } while(true);
1030
1031 if(RecvResult && RecvResult != AVERROR(EAGAIN))
1032 {
1033 char aError[AV_ERROR_MAX_STRING_SIZE];
1034 av_strerror(errnum: RecvResult, errbuf: aError, errbuf_size: sizeof(aError));
1035 log_error("videorecorder", "Could not encode video frame: %s", aError);
1036 }
1037
1038 av_packet_free(pkt: &pPacket);
1039}
1040
1041void CVideo::FinishFrames(COutputStream *pStream)
1042{
1043 if(!pStream->m_pCodecContext || !avcodec_is_open(s: pStream->m_pCodecContext))
1044 return;
1045
1046 AVPacket *pPacket = av_packet_alloc();
1047 if(pPacket == nullptr)
1048 {
1049 log_error("videorecorder", "Could not allocate packet");
1050 return;
1051 }
1052
1053 pPacket->data = nullptr;
1054 pPacket->size = 0;
1055
1056 avcodec_send_frame(avctx: pStream->m_pCodecContext, frame: nullptr);
1057 int RecvResult = 0;
1058 do
1059 {
1060 RecvResult = avcodec_receive_packet(avctx: pStream->m_pCodecContext, avpkt: pPacket);
1061 if(!RecvResult)
1062 {
1063 /* rescale output packet timestamp values from codec to stream timebase */
1064 av_packet_rescale_ts(pkt: pPacket, tb_src: pStream->m_pCodecContext->time_base, tb_dst: pStream->m_pStream->time_base);
1065 pPacket->stream_index = pStream->m_pStream->index;
1066
1067 const int WriteFrameResult = av_interleaved_write_frame(s: m_pFormatContext, pkt: pPacket);
1068 if(WriteFrameResult < 0)
1069 {
1070 char aError[AV_ERROR_MAX_STRING_SIZE];
1071 av_strerror(errnum: WriteFrameResult, errbuf: aError, errbuf_size: sizeof(aError));
1072 log_error("videorecorder", "Could not write video frame: %s", aError);
1073 }
1074 }
1075 else
1076 break;
1077 } while(true);
1078
1079 if(RecvResult && RecvResult != AVERROR_EOF)
1080 {
1081 char aError[AV_ERROR_MAX_STRING_SIZE];
1082 av_strerror(errnum: RecvResult, errbuf: aError, errbuf_size: sizeof(aError));
1083 log_error("videorecorder", "Could not finish recording: %s", aError);
1084 }
1085
1086 av_packet_free(pkt: &pPacket);
1087}
1088
1089void CVideo::CloseStream(COutputStream *pStream)
1090{
1091 avcodec_free_context(avctx: &pStream->m_pCodecContext);
1092
1093 for(auto *pFrame : pStream->m_vpFrames)
1094 av_frame_free(frame: &pFrame);
1095 pStream->m_vpFrames.clear();
1096
1097 for(auto *pFrame : pStream->m_vpTmpFrames)
1098 av_frame_free(frame: &pFrame);
1099 pStream->m_vpTmpFrames.clear();
1100
1101 for(auto *pSwsContext : pStream->m_vpSwsContexts)
1102 sws_freeContext(swsContext: pSwsContext);
1103 pStream->m_vpSwsContexts.clear();
1104
1105 for(auto *pSwrContext : pStream->m_vpSwrContexts)
1106 swr_free(s: &pSwrContext);
1107 pStream->m_vpSwrContexts.clear();
1108}
1109
1110#endif
1111