1#if defined(CONF_VIDEORECORDER)
2
3#include "video.h"
4
5#include <base/dbg.h>
6#include <base/io.h>
7#include <base/log.h>
8#include <base/str.h>
9#include <base/time.h>
10
11#include <engine/graphics.h>
12#include <engine/shared/config.h>
13#include <engine/sound.h>
14#include <engine/storage.h>
15
16extern "C" {
17#include <libavutil/avutil.h>
18#include <libavutil/opt.h>
19#include <libswresample/swresample.h>
20#include <libswscale/swscale.h>
21};
22
23#include <chrono>
24#include <memory>
25#include <mutex>
26#include <thread>
27
28using namespace std::chrono_literals;
29
30// This code is mostly stolen from https://github.com/FFmpeg/FFmpeg/blob/master/doc/examples/muxing.c
31
32static const enum AVColorSpace COLOR_SPACE = AVCOL_SPC_BT709;
33// AVCodecContext->colorspace is an enum AVColorSpace but sws_getCoefficients
34// wants an SWS_CS_* macro. Both sets of constants follow H.273 numbering
35// and hence agree, but we assert that they're equal here to be sure.
36static_assert(COLOR_SPACE == SWS_CS_ITU709);
37
38static LEVEL AvLevelToLogLevel(int Level)
39{
40 switch(Level)
41 {
42 case AV_LOG_PANIC:
43 case AV_LOG_FATAL:
44 case AV_LOG_ERROR:
45 return LEVEL_ERROR;
46 case AV_LOG_WARNING:
47 return LEVEL_WARN;
48 case AV_LOG_INFO:
49 return LEVEL_INFO;
50 case AV_LOG_VERBOSE:
51 case AV_LOG_DEBUG:
52 return LEVEL_DEBUG;
53 case AV_LOG_TRACE:
54 return LEVEL_TRACE;
55 default:
56 dbg_assert_failed("invalid log level: %d", Level);
57 }
58}
59
60[[gnu::format(printf, 3, 0)]] static void AvLogCallback(void *pUser, int Level, const char *pFormat, va_list VarArgs)
61{
62 const LEVEL LogLevel = AvLevelToLogLevel(Level);
63 if(LogLevel <= LEVEL_INFO)
64 {
65 char aLog[4096]; // Longest log line length
66 int Length = str_format_v(buffer: aLog, buffer_size: sizeof(aLog), format: pFormat, args: VarArgs);
67 if(Length > 0)
68 {
69 if(aLog[Length - 1] == '\n')
70 {
71 aLog[Length - 1] = '\0';
72 }
73 log_log(level: LogLevel, sys: "videorecorder/libav", fmt: "%s", aLog);
74 }
75 }
76}
77
78void CVideo::Init()
79{
80 av_log_set_callback(callback: AvLogCallback);
81}
82
83CVideo::CVideo(IGraphics *pGraphics, ISound *pSound, IStorage *pStorage, int Width, int Height, int64_t LocalStartTime, const char *pName) :
84 m_pGraphics(pGraphics),
85 m_pStorage(pStorage),
86 m_pSound(pSound)
87{
88 m_pFormatContext = nullptr;
89 m_pFormat = nullptr;
90 m_pOptDict = nullptr;
91
92 m_pVideoCodec = nullptr;
93 m_pAudioCodec = nullptr;
94
95 m_Width = Width;
96 m_Height = Height;
97 str_copy(dst&: m_aName, src: pName);
98
99 m_FPS = g_Config.m_ClVideoRecorderFPS;
100 m_TickTime = time_freq() / m_FPS;
101 m_LocalStartTime = LocalStartTime;
102
103 m_Recording = false;
104 m_Started = false;
105 m_Stopped = false;
106 m_ProcessingVideoFrame = 0;
107 m_ProcessingAudioFrame = 0;
108
109 m_HasAudio = m_pSound->IsSoundEnabled() && g_Config.m_ClVideoSndEnable;
110
111 dbg_assert(ms_pCurrentVideo == nullptr, "ms_pCurrentVideo is NOT set to nullptr while creating a new Video.");
112 ms_pCurrentVideo = this;
113}
114
115CVideo::~CVideo()
116{
117 ms_pCurrentVideo = nullptr;
118}
119
120bool CVideo::Start()
121{
122 dbg_assert(!m_Started, "Already started");
123
124 // wait for the graphic thread to idle
125 m_pGraphics->WaitForIdle();
126
127 m_AudioStream = {};
128 m_VideoStream = {};
129
130 char aWholePath[IO_MAX_PATH_LENGTH];
131 IOHANDLE File = m_pStorage->OpenFile(pFilename: m_aName, Flags: IOFLAG_WRITE, Type: IStorage::TYPE_SAVE, pBuffer: aWholePath, BufferSize: sizeof(aWholePath));
132 if(File)
133 {
134 io_close(io: File);
135 }
136 else
137 {
138 log_error("videorecorder", "Could not open file '%s'", aWholePath);
139 return false;
140 }
141
142 const int FormatAllocResult = avformat_alloc_output_context2(ctx: &m_pFormatContext, oformat: nullptr, format_name: "mp4", filename: aWholePath);
143 if(FormatAllocResult < 0 || !m_pFormatContext)
144 {
145 char aError[AV_ERROR_MAX_STRING_SIZE];
146 av_strerror(errnum: FormatAllocResult, errbuf: aError, errbuf_size: sizeof(aError));
147 log_error("videorecorder", "Could not create format context: %s", aError);
148 return false;
149 }
150
151 m_pFormat = m_pFormatContext->oformat;
152
153#if defined(CONF_ARCH_IA32) || defined(CONF_ARCH_ARM) || defined(CONF_ARCH_WASM)
154 // use only the minimum of 2 threads on 32-bit to save memory
155 m_VideoThreads = 2;
156 m_AudioThreads = 2;
157#else
158 m_VideoThreads = std::thread::hardware_concurrency() + 2;
159 // audio gets a bit less
160 m_AudioThreads = (std::thread::hardware_concurrency() / 2) + 2;
161#endif
162
163 m_CurVideoThreadIndex = 0;
164 m_CurAudioThreadIndex = 0;
165
166 const size_t VideoBufferSize = (size_t)4 * m_Width * m_Height * sizeof(uint8_t);
167 m_vVideoBuffers.resize(new_size: m_VideoThreads);
168 for(size_t i = 0; i < m_VideoThreads; ++i)
169 {
170 m_vVideoBuffers[i].m_vBuffer.resize(new_size: VideoBufferSize);
171 }
172
173 m_vAudioBuffers.resize(new_size: m_AudioThreads);
174
175 /* Add the audio and video streams using the default format codecs
176 * and initialize the codecs. */
177 if(m_pFormat->video_codec != AV_CODEC_ID_NONE)
178 {
179 if(!AddStream(pStream: &m_VideoStream, pFormatContext: m_pFormatContext, ppCodec: &m_pVideoCodec, CodecId: m_pFormat->video_codec))
180 return false;
181 }
182 else
183 {
184 log_error("videorecorder", "Could not determine default video stream codec");
185 return false;
186 }
187
188 if(m_HasAudio)
189 {
190 if(m_pFormat->audio_codec != AV_CODEC_ID_NONE)
191 {
192 if(!AddStream(pStream: &m_AudioStream, pFormatContext: m_pFormatContext, ppCodec: &m_pAudioCodec, CodecId: m_pFormat->audio_codec))
193 return false;
194 }
195 else
196 {
197 log_error("videorecorder", "Could not determine default audio stream codec");
198 return false;
199 }
200 }
201
202 m_vpVideoThreads.resize(new_size: m_VideoThreads);
203 for(size_t i = 0; i < m_VideoThreads; ++i)
204 {
205 m_vpVideoThreads[i] = std::make_unique<CVideoRecorderThread>();
206 }
207 for(size_t i = 0; i < m_VideoThreads; ++i)
208 {
209 std::unique_lock<std::mutex> Lock(m_vpVideoThreads[i]->m_Mutex);
210 m_vpVideoThreads[i]->m_Thread = std::thread([this, i]() REQUIRES(!m_WriteLock) { RunVideoThread(ParentThreadIndex: i == 0 ? (m_VideoThreads - 1) : (i - 1), ThreadIndex: i); });
211 m_vpVideoThreads[i]->m_Cond.wait(lock&: Lock, p: [this, i]() -> bool { return m_vpVideoThreads[i]->m_Started; });
212 }
213
214 m_vpAudioThreads.resize(new_size: m_AudioThreads);
215 for(size_t i = 0; i < m_AudioThreads; ++i)
216 {
217 m_vpAudioThreads[i] = std::make_unique<CAudioRecorderThread>();
218 }
219 for(size_t i = 0; i < m_AudioThreads; ++i)
220 {
221 std::unique_lock<std::mutex> Lock(m_vpAudioThreads[i]->m_Mutex);
222 m_vpAudioThreads[i]->m_Thread = std::thread([this, i]() REQUIRES(!m_WriteLock) { RunAudioThread(ParentThreadIndex: i == 0 ? (m_AudioThreads - 1) : (i - 1), ThreadIndex: i); });
223 m_vpAudioThreads[i]->m_Cond.wait(lock&: Lock, p: [this, i]() -> bool { return m_vpAudioThreads[i]->m_Started; });
224 }
225
226 /* Now that all the parameters are set, we can open the audio and
227 * video codecs and allocate the necessary encode buffers. */
228 if(!OpenVideo())
229 return false;
230
231 if(m_HasAudio && !OpenAudio())
232 return false;
233
234 /* open the output file, if needed */
235 if(!(m_pFormat->flags & AVFMT_NOFILE))
236 {
237 const int OpenResult = avio_open(s: &m_pFormatContext->pb, url: aWholePath, AVIO_FLAG_WRITE);
238 if(OpenResult < 0)
239 {
240 char aError[AV_ERROR_MAX_STRING_SIZE];
241 av_strerror(errnum: OpenResult, errbuf: aError, errbuf_size: sizeof(aError));
242 log_error("videorecorder", "Could not open file '%s': %s", aWholePath, aError);
243 return false;
244 }
245 }
246
247 m_VideoStream.m_vpSwsContexts.reserve(n: m_VideoThreads);
248
249 for(size_t i = 0; i < m_VideoThreads; ++i)
250 {
251 if(m_VideoStream.m_vpSwsContexts.size() <= i)
252 m_VideoStream.m_vpSwsContexts.emplace_back(args: nullptr);
253
254 if(!m_VideoStream.m_vpSwsContexts[i])
255 {
256 m_VideoStream.m_vpSwsContexts[i] = sws_getCachedContext(
257 context: m_VideoStream.m_vpSwsContexts[i],
258 srcW: m_VideoStream.m_pCodecContext->width, srcH: m_VideoStream.m_pCodecContext->height, srcFormat: AV_PIX_FMT_RGBA,
259 dstW: m_VideoStream.m_pCodecContext->width, dstH: m_VideoStream.m_pCodecContext->height, dstFormat: AV_PIX_FMT_YUV420P,
260 flags: SWS_FULL_CHR_H_INT | SWS_FULL_CHR_H_INP | SWS_ACCURATE_RND | SWS_BITEXACT, srcFilter: nullptr, dstFilter: nullptr, param: nullptr);
261
262 const int *pMatrixCoefficients = sws_getCoefficients(colorspace: COLOR_SPACE);
263 sws_setColorspaceDetails(c: m_VideoStream.m_vpSwsContexts[i], inv_table: pMatrixCoefficients, srcRange: 0, table: pMatrixCoefficients, dstRange: 0, brightness: 0, contrast: 1 << 16, saturation: 1 << 16);
264 }
265 }
266
267 /* Write the stream header, if any. */
268 const int WriteHeaderResult = avformat_write_header(s: m_pFormatContext, options: &m_pOptDict);
269 if(WriteHeaderResult < 0)
270 {
271 char aError[AV_ERROR_MAX_STRING_SIZE];
272 av_strerror(errnum: WriteHeaderResult, errbuf: aError, errbuf_size: sizeof(aError));
273 log_error("videorecorder", "Could not write header: %s", aError);
274 return false;
275 }
276
277 m_Recording = true;
278 m_Started = true;
279 m_Stopped = false;
280 m_Time = time_get();
281 m_LocalTime = (m_Time - m_LocalStartTime) / (float)time_freq();
282 return true;
283}
284
285void CVideo::Pause(bool Pause)
286{
287 if(ms_pCurrentVideo)
288 m_Recording = !Pause;
289}
290
291void CVideo::Stop()
292{
293 dbg_assert(!m_Stopped, "Already stopped");
294 m_Stopped = true;
295
296 m_pGraphics->WaitForIdle();
297
298 for(auto &pVideoThread : m_vpVideoThreads)
299 {
300 {
301 std::unique_lock<std::mutex> Lock(pVideoThread->m_Mutex);
302 pVideoThread->m_Finished = true;
303 pVideoThread->m_Cond.notify_all();
304 }
305
306 pVideoThread->m_Thread.join();
307 }
308 m_vpVideoThreads.clear();
309
310 for(auto &pAudioThread : m_vpAudioThreads)
311 {
312 {
313 std::unique_lock<std::mutex> Lock(pAudioThread->m_Mutex);
314 pAudioThread->m_Finished = true;
315 pAudioThread->m_Cond.notify_all();
316 }
317
318 pAudioThread->m_Thread.join();
319 }
320 m_vpAudioThreads.clear();
321
322 while(m_ProcessingVideoFrame > 0 || m_ProcessingAudioFrame > 0)
323 std::this_thread::sleep_for(rtime: 10us);
324
325 m_Recording = false;
326
327 FinishFrames(pStream: &m_VideoStream);
328
329 if(m_HasAudio)
330 FinishFrames(pStream: &m_AudioStream);
331
332 if(m_pFormatContext && m_Started)
333 av_write_trailer(s: m_pFormatContext);
334
335 CloseStream(pStream: &m_VideoStream);
336
337 if(m_HasAudio)
338 CloseStream(pStream: &m_AudioStream);
339
340 if(m_pFormatContext)
341 {
342 if(!(m_pFormat->flags & AVFMT_NOFILE))
343 avio_closep(s: &m_pFormatContext->pb);
344
345 avformat_free_context(s: m_pFormatContext);
346 }
347
348 ISound *volatile pSound = m_pSound;
349
350 pSound->PauseAudioDevice();
351 delete ms_pCurrentVideo;
352 pSound->UnpauseAudioDevice();
353}
354
355void CVideo::NextVideoFrameThread()
356{
357 if(m_Recording)
358 {
359 m_VideoFrameIndex += 1;
360 if(m_VideoFrameIndex >= 2)
361 {
362 m_ProcessingVideoFrame.fetch_add(i: 1);
363
364 size_t NextVideoThreadIndex = m_CurVideoThreadIndex + 1;
365 if(NextVideoThreadIndex == m_VideoThreads)
366 NextVideoThreadIndex = 0;
367
368 // always wait for the next video thread too, to prevent a dead lock
369 {
370 auto *pVideoThread = m_vpVideoThreads[NextVideoThreadIndex].get();
371 std::unique_lock<std::mutex> Lock(pVideoThread->m_Mutex);
372
373 if(pVideoThread->m_HasVideoFrame)
374 {
375 pVideoThread->m_Cond.wait(lock&: Lock, p: [&pVideoThread]() -> bool { return !pVideoThread->m_HasVideoFrame; });
376 }
377 }
378
379 // after reading the graphic libraries' frame buffer, go threaded
380 {
381 auto *pVideoThread = m_vpVideoThreads[m_CurVideoThreadIndex].get();
382 std::unique_lock<std::mutex> Lock(pVideoThread->m_Mutex);
383
384 if(pVideoThread->m_HasVideoFrame)
385 {
386 pVideoThread->m_Cond.wait(lock&: Lock, p: [&pVideoThread]() -> bool { return !pVideoThread->m_HasVideoFrame; });
387 }
388
389 UpdateVideoBufferFromGraphics(ThreadIndex: m_CurVideoThreadIndex);
390
391 pVideoThread->m_HasVideoFrame = true;
392 {
393 std::unique_lock<std::mutex> LockParent(pVideoThread->m_VideoFillMutex);
394 pVideoThread->m_VideoFrameToFill = m_VideoFrameIndex;
395 }
396 pVideoThread->m_Cond.notify_all();
397 }
398
399 ++m_CurVideoThreadIndex;
400 if(m_CurVideoThreadIndex == m_VideoThreads)
401 m_CurVideoThreadIndex = 0;
402 }
403 }
404}
405
406void CVideo::NextVideoFrame()
407{
408 if(m_Recording)
409 {
410 m_Time += m_TickTime;
411 m_LocalTime = (m_Time - m_LocalStartTime) / (float)time_freq();
412 }
413}
414
415void CVideo::NextAudioFrameTimeline(ISoundMixFunc Mix)
416{
417 if(m_Recording && m_HasAudio)
418 {
419 double SamplesPerFrame = (double)m_AudioStream.m_pCodecContext->sample_rate / m_FPS;
420 while(m_AudioStream.m_SamplesFrameCount >= m_AudioStream.m_SamplesCount)
421 {
422 NextAudioFrame(Mix);
423 }
424 m_AudioStream.m_SamplesFrameCount += SamplesPerFrame;
425 }
426}
427
428void CVideo::NextAudioFrame(ISoundMixFunc Mix)
429{
430 if(m_Recording && m_HasAudio)
431 {
432 m_AudioFrameIndex += 1;
433
434 m_ProcessingAudioFrame.fetch_add(i: 1);
435
436 size_t NextAudioThreadIndex = m_CurAudioThreadIndex + 1;
437 if(NextAudioThreadIndex == m_AudioThreads)
438 NextAudioThreadIndex = 0;
439
440 // always wait for the next Audio thread too, to prevent a dead lock
441
442 {
443 auto *pAudioThread = m_vpAudioThreads[NextAudioThreadIndex].get();
444 std::unique_lock<std::mutex> Lock(pAudioThread->m_Mutex);
445
446 if(pAudioThread->m_HasAudioFrame)
447 {
448 pAudioThread->m_Cond.wait(lock&: Lock, p: [&pAudioThread]() -> bool { return !pAudioThread->m_HasAudioFrame; });
449 }
450 }
451
452 // after reading the graphic libraries' frame buffer, go threaded
453 {
454 auto *pAudioThread = m_vpAudioThreads[m_CurAudioThreadIndex].get();
455
456 std::unique_lock<std::mutex> Lock(pAudioThread->m_Mutex);
457
458 if(pAudioThread->m_HasAudioFrame)
459 {
460 pAudioThread->m_Cond.wait(lock&: Lock, p: [&pAudioThread]() -> bool { return !pAudioThread->m_HasAudioFrame; });
461 }
462
463 Mix(m_vAudioBuffers[m_CurAudioThreadIndex].m_aBuffer, std::size(m_vAudioBuffers[m_CurAudioThreadIndex].m_aBuffer) / 2 / 2); // two channels
464
465 int64_t DstNbSamples = av_rescale_rnd(
466 a: swr_get_delay(s: m_AudioStream.m_vpSwrContexts[m_CurAudioThreadIndex], base: m_AudioStream.m_pCodecContext->sample_rate) +
467 m_AudioStream.m_vpFrames[m_CurAudioThreadIndex]->nb_samples,
468 b: m_AudioStream.m_pCodecContext->sample_rate,
469 c: m_AudioStream.m_pCodecContext->sample_rate, rnd: AV_ROUND_UP);
470
471 pAudioThread->m_SampleCountStart = m_AudioStream.m_SamplesCount;
472 m_AudioStream.m_SamplesCount += DstNbSamples;
473
474 pAudioThread->m_HasAudioFrame = true;
475 {
476 std::unique_lock<std::mutex> LockParent(pAudioThread->m_AudioFillMutex);
477 pAudioThread->m_AudioFrameToFill = m_AudioFrameIndex;
478 }
479 pAudioThread->m_Cond.notify_all();
480 }
481
482 ++m_CurAudioThreadIndex;
483 if(m_CurAudioThreadIndex == m_AudioThreads)
484 m_CurAudioThreadIndex = 0;
485 }
486}
487
488void CVideo::RunAudioThread(size_t ParentThreadIndex, size_t ThreadIndex)
489{
490 auto *pThreadData = m_vpAudioThreads[ThreadIndex].get();
491 auto *pParentThreadData = m_vpAudioThreads[ParentThreadIndex].get();
492 std::unique_lock<std::mutex> Lock(pThreadData->m_Mutex);
493 pThreadData->m_Started = true;
494 pThreadData->m_Cond.notify_all();
495
496 while(!pThreadData->m_Finished)
497 {
498 pThreadData->m_Cond.wait(lock&: Lock, p: [&pThreadData]() -> bool { return pThreadData->m_HasAudioFrame || pThreadData->m_Finished; });
499 pThreadData->m_Cond.notify_all();
500
501 if(pThreadData->m_HasAudioFrame)
502 {
503 FillAudioFrame(ThreadIndex);
504 // check if we need to wait for the parent to finish
505 {
506 std::unique_lock<std::mutex> LockParent(pParentThreadData->m_AudioFillMutex);
507 if(pParentThreadData->m_AudioFrameToFill != 0 && pThreadData->m_AudioFrameToFill >= pParentThreadData->m_AudioFrameToFill)
508 {
509 // wait for the parent to finish its frame
510 pParentThreadData->m_AudioFillCond.wait(lock&: LockParent, p: [&pParentThreadData]() -> bool { return pParentThreadData->m_AudioFrameToFill == 0; });
511 }
512 }
513 {
514 std::unique_lock<std::mutex> LockAudio(pThreadData->m_AudioFillMutex);
515
516 {
517 const CLockScope LockScope(m_WriteLock);
518 m_AudioStream.m_vpFrames[ThreadIndex]->pts = av_rescale_q(a: pThreadData->m_SampleCountStart, bq: AVRational{.num: 1, .den: m_AudioStream.m_pCodecContext->sample_rate}, cq: m_AudioStream.m_pCodecContext->time_base);
519 WriteFrame(pStream: &m_AudioStream, ThreadIndex);
520 }
521
522 pThreadData->m_AudioFrameToFill = 0;
523 pThreadData->m_AudioFillCond.notify_all();
524 pThreadData->m_Cond.notify_all();
525 }
526 m_ProcessingAudioFrame.fetch_sub(i: 1);
527
528 pThreadData->m_HasAudioFrame = false;
529 }
530 }
531}
532
533void CVideo::FillAudioFrame(size_t ThreadIndex)
534{
535 const int FillArrayResult = av_samples_fill_arrays(
536 audio_data: (uint8_t **)m_AudioStream.m_vpTmpFrames[ThreadIndex]->data,
537 linesize: nullptr, // pointer to linesize (int*)
538 buf: (const uint8_t *)m_vAudioBuffers[ThreadIndex].m_aBuffer,
539 nb_channels: 2, // channels
540 nb_samples: m_AudioStream.m_vpTmpFrames[ThreadIndex]->nb_samples,
541 sample_fmt: AV_SAMPLE_FMT_S16,
542 align: 0 // align
543 );
544 if(FillArrayResult < 0)
545 {
546 char aError[AV_ERROR_MAX_STRING_SIZE];
547 av_strerror(errnum: FillArrayResult, errbuf: aError, errbuf_size: sizeof(aError));
548 log_error("videorecorder", "Could not fill audio frame: %s", aError);
549 return;
550 }
551
552 const int MakeWriteableResult = av_frame_make_writable(frame: m_AudioStream.m_vpFrames[ThreadIndex]);
553 if(MakeWriteableResult < 0)
554 {
555 char aError[AV_ERROR_MAX_STRING_SIZE];
556 av_strerror(errnum: MakeWriteableResult, errbuf: aError, errbuf_size: sizeof(aError));
557 log_error("videorecorder", "Could not make audio frame writeable: %s", aError);
558 return;
559 }
560
561 /* convert to destination format */
562 const int ConvertResult = swr_convert(
563 s: m_AudioStream.m_vpSwrContexts[ThreadIndex],
564 out: m_AudioStream.m_vpFrames[ThreadIndex]->data,
565 out_count: m_AudioStream.m_vpFrames[ThreadIndex]->nb_samples,
566 in: (const uint8_t **)m_AudioStream.m_vpTmpFrames[ThreadIndex]->data,
567 in_count: m_AudioStream.m_vpTmpFrames[ThreadIndex]->nb_samples);
568 if(ConvertResult < 0)
569 {
570 char aError[AV_ERROR_MAX_STRING_SIZE];
571 av_strerror(errnum: ConvertResult, errbuf: aError, errbuf_size: sizeof(aError));
572 log_error("videorecorder", "Could not convert audio frame: %s", aError);
573 return;
574 }
575}
576
577void CVideo::RunVideoThread(size_t ParentThreadIndex, size_t ThreadIndex)
578{
579 auto *pThreadData = m_vpVideoThreads[ThreadIndex].get();
580 auto *pParentThreadData = m_vpVideoThreads[ParentThreadIndex].get();
581 std::unique_lock<std::mutex> Lock(pThreadData->m_Mutex);
582 pThreadData->m_Started = true;
583 pThreadData->m_Cond.notify_all();
584
585 while(!pThreadData->m_Finished)
586 {
587 pThreadData->m_Cond.wait(lock&: Lock, p: [&pThreadData]() -> bool { return pThreadData->m_HasVideoFrame || pThreadData->m_Finished; });
588 pThreadData->m_Cond.notify_all();
589
590 if(pThreadData->m_HasVideoFrame)
591 {
592 FillVideoFrame(ThreadIndex);
593 // check if we need to wait for the parent to finish
594 {
595 std::unique_lock<std::mutex> LockParent(pParentThreadData->m_VideoFillMutex);
596 if(pParentThreadData->m_VideoFrameToFill != 0 && pThreadData->m_VideoFrameToFill >= pParentThreadData->m_VideoFrameToFill)
597 {
598 // wait for the parent to finish its frame
599 pParentThreadData->m_VideoFillCond.wait(lock&: LockParent, p: [&pParentThreadData]() -> bool { return pParentThreadData->m_VideoFrameToFill == 0; });
600 }
601 }
602 {
603 std::unique_lock<std::mutex> LockVideo(pThreadData->m_VideoFillMutex);
604 {
605 const CLockScope LockScope(m_WriteLock);
606#if LIBAVCODEC_VERSION_MAJOR >= 60
607 m_VideoStream.m_vpFrames[ThreadIndex]->pts = m_VideoStream.m_pCodecContext->frame_num;
608#else
609 m_VideoStream.m_vpFrames[ThreadIndex]->pts = m_VideoStream.m_pCodecContext->frame_number;
610#endif
611 WriteFrame(pStream: &m_VideoStream, ThreadIndex);
612 }
613
614 pThreadData->m_VideoFrameToFill = 0;
615 pThreadData->m_VideoFillCond.notify_all();
616 pThreadData->m_Cond.notify_all();
617 }
618 m_ProcessingVideoFrame.fetch_sub(i: 1);
619
620 pThreadData->m_HasVideoFrame = false;
621 }
622 }
623}
624
625void CVideo::FillVideoFrame(size_t ThreadIndex)
626{
627 const int InLineSize = 4 * m_VideoStream.m_pCodecContext->width;
628 auto *pRGBAData = m_vVideoBuffers[ThreadIndex].m_vBuffer.data();
629 sws_scale(c: m_VideoStream.m_vpSwsContexts[ThreadIndex], srcSlice: (const uint8_t *const *)&pRGBAData, srcStride: &InLineSize, srcSliceY: 0,
630 srcSliceH: m_VideoStream.m_pCodecContext->height, dst: m_VideoStream.m_vpFrames[ThreadIndex]->data, dstStride: m_VideoStream.m_vpFrames[ThreadIndex]->linesize);
631}
632
633void CVideo::UpdateVideoBufferFromGraphics(size_t ThreadIndex)
634{
635 uint32_t Width;
636 uint32_t Height;
637 CImageInfo::EImageFormat Format;
638 m_pGraphics->GetReadPresentedImageDataFuncUnsafe()(Width, Height, Format, m_vVideoBuffers[ThreadIndex].m_vBuffer);
639 dbg_assert((int)Width == m_Width && (int)Height == m_Height, "Size mismatch between video (%d x %d) and graphics (%d x %d)", m_Width, m_Height, Width, Height);
640 dbg_assert(Format == CImageInfo::FORMAT_RGBA, "Unexpected image format %d", (int)Format);
641}
642
643AVFrame *CVideo::AllocPicture(enum AVPixelFormat PixFmt, int Width, int Height)
644{
645 AVFrame *pPicture = av_frame_alloc();
646 if(!pPicture)
647 {
648 log_error("videorecorder", "Could not allocate video frame");
649 return nullptr;
650 }
651
652 pPicture->format = PixFmt;
653 pPicture->width = Width;
654 pPicture->height = Height;
655
656 /* allocate the buffers for the frame data */
657 const int FrameBufferAllocResult = av_frame_get_buffer(frame: pPicture, align: 32);
658 if(FrameBufferAllocResult < 0)
659 {
660 char aError[AV_ERROR_MAX_STRING_SIZE];
661 av_strerror(errnum: FrameBufferAllocResult, errbuf: aError, errbuf_size: sizeof(aError));
662 log_error("videorecorder", "Could not allocate video frame buffer: %s", aError);
663 return nullptr;
664 }
665
666 return pPicture;
667}
668
669AVFrame *CVideo::AllocAudioFrame(enum AVSampleFormat SampleFmt, uint64_t ChannelLayout, int SampleRate, int NbSamples)
670{
671 AVFrame *pFrame = av_frame_alloc();
672 if(!pFrame)
673 {
674 log_error("videorecorder", "Could not allocate audio frame");
675 return nullptr;
676 }
677
678 pFrame->format = SampleFmt;
679#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100)
680 dbg_assert(av_channel_layout_from_mask(&pFrame->ch_layout, ChannelLayout) == 0, "Failed to set channel layout");
681#else
682 pFrame->channel_layout = ChannelLayout;
683#endif
684 pFrame->sample_rate = SampleRate;
685 pFrame->nb_samples = NbSamples;
686
687 if(NbSamples)
688 {
689 const int FrameBufferAllocResult = av_frame_get_buffer(frame: pFrame, align: 0);
690 if(FrameBufferAllocResult < 0)
691 {
692 char aError[AV_ERROR_MAX_STRING_SIZE];
693 av_strerror(errnum: FrameBufferAllocResult, errbuf: aError, errbuf_size: sizeof(aError));
694 log_error("videorecorder", "Could not allocate audio frame buffer: %s", aError);
695 return nullptr;
696 }
697 }
698
699 return pFrame;
700}
701
702bool CVideo::OpenVideo()
703{
704 AVCodecContext *pContext = m_VideoStream.m_pCodecContext;
705 AVDictionary *pOptions = nullptr;
706 av_dict_copy(dst: &pOptions, src: m_pOptDict, flags: 0);
707
708 /* open the codec */
709 const int VideoOpenResult = avcodec_open2(avctx: pContext, codec: m_pVideoCodec, options: &pOptions);
710 av_dict_free(m: &pOptions);
711 if(VideoOpenResult < 0)
712 {
713 char aError[AV_ERROR_MAX_STRING_SIZE];
714 av_strerror(errnum: VideoOpenResult, errbuf: aError, errbuf_size: sizeof(aError));
715 log_error("videorecorder", "Could not open video codec: %s", aError);
716 return false;
717 }
718
719 m_VideoStream.m_vpFrames.clear();
720 m_VideoStream.m_vpFrames.reserve(n: m_VideoThreads);
721
722 /* allocate and init a re-usable frame */
723 for(size_t i = 0; i < m_VideoThreads; ++i)
724 {
725 m_VideoStream.m_vpFrames.emplace_back(args: nullptr);
726 m_VideoStream.m_vpFrames[i] = AllocPicture(PixFmt: pContext->pix_fmt, Width: pContext->width, Height: pContext->height);
727 if(!m_VideoStream.m_vpFrames[i])
728 {
729 return false;
730 }
731 }
732
733 /* If the output format is not YUV420P, then a temporary YUV420P
734 * picture is needed too. It is then converted to the required
735 * output format. */
736 m_VideoStream.m_vpTmpFrames.clear();
737 m_VideoStream.m_vpTmpFrames.reserve(n: m_VideoThreads);
738
739 if(pContext->pix_fmt != AV_PIX_FMT_YUV420P)
740 {
741 /* allocate and init a re-usable frame */
742 for(size_t i = 0; i < m_VideoThreads; ++i)
743 {
744 m_VideoStream.m_vpTmpFrames.emplace_back(args: nullptr);
745 m_VideoStream.m_vpTmpFrames[i] = AllocPicture(PixFmt: AV_PIX_FMT_YUV420P, Width: pContext->width, Height: pContext->height);
746 if(!m_VideoStream.m_vpTmpFrames[i])
747 {
748 return false;
749 }
750 }
751 }
752
753 /* copy the stream parameters to the muxer */
754 const int AudioStreamCopyResult = avcodec_parameters_from_context(par: m_VideoStream.m_pStream->codecpar, codec: pContext);
755 if(AudioStreamCopyResult < 0)
756 {
757 char aError[AV_ERROR_MAX_STRING_SIZE];
758 av_strerror(errnum: AudioStreamCopyResult, errbuf: aError, errbuf_size: sizeof(aError));
759 log_error("videorecorder", "Could not copy video stream parameters: %s", aError);
760 return false;
761 }
762 m_VideoFrameIndex = 0;
763 return true;
764}
765
766bool CVideo::OpenAudio()
767{
768 AVCodecContext *pContext = m_AudioStream.m_pCodecContext;
769 AVDictionary *pOptions = nullptr;
770 av_dict_copy(dst: &pOptions, src: m_pOptDict, flags: 0);
771
772 /* open it */
773 const int AudioOpenResult = avcodec_open2(avctx: pContext, codec: m_pAudioCodec, options: &pOptions);
774 av_dict_free(m: &pOptions);
775 if(AudioOpenResult < 0)
776 {
777 char aError[AV_ERROR_MAX_STRING_SIZE];
778 av_strerror(errnum: AudioOpenResult, errbuf: aError, errbuf_size: sizeof(aError));
779 log_error("videorecorder", "Could not open audio codec: %s", aError);
780 return false;
781 }
782
783 int NbSamples;
784 if(pContext->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)
785 NbSamples = 10000;
786 else
787 NbSamples = pContext->frame_size;
788
789 m_AudioStream.m_vpFrames.clear();
790 m_AudioStream.m_vpFrames.reserve(n: m_AudioThreads);
791
792 m_AudioStream.m_vpTmpFrames.clear();
793 m_AudioStream.m_vpTmpFrames.reserve(n: m_AudioThreads);
794
795 /* allocate and init a re-usable frame */
796 for(size_t i = 0; i < m_AudioThreads; ++i)
797 {
798 m_AudioStream.m_vpFrames.emplace_back(args: nullptr);
799#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100)
800 m_AudioStream.m_vpFrames[i] = AllocAudioFrame(SampleFmt: pContext->sample_fmt, ChannelLayout: pContext->ch_layout.u.mask, SampleRate: pContext->sample_rate, NbSamples);
801#else
802 m_AudioStream.m_vpFrames[i] = AllocAudioFrame(pContext->sample_fmt, pContext->channel_layout, pContext->sample_rate, NbSamples);
803#endif
804 if(!m_AudioStream.m_vpFrames[i])
805 {
806 return false;
807 }
808
809 m_AudioStream.m_vpTmpFrames.emplace_back(args: nullptr);
810 m_AudioStream.m_vpTmpFrames[i] = AllocAudioFrame(SampleFmt: AV_SAMPLE_FMT_S16, AV_CH_LAYOUT_STEREO, SampleRate: m_pSound->MixingRate(), NbSamples);
811 if(!m_AudioStream.m_vpTmpFrames[i])
812 {
813 return false;
814 }
815 }
816
817 /* copy the stream parameters to the muxer */
818 const int AudioStreamCopyResult = avcodec_parameters_from_context(par: m_AudioStream.m_pStream->codecpar, codec: pContext);
819 if(AudioStreamCopyResult < 0)
820 {
821 char aError[AV_ERROR_MAX_STRING_SIZE];
822 av_strerror(errnum: AudioStreamCopyResult, errbuf: aError, errbuf_size: sizeof(aError));
823 log_error("videorecorder", "Could not copy audio stream parameters: %s", aError);
824 return false;
825 }
826
827 /* create resampling context */
828 m_AudioStream.m_vpSwrContexts.clear();
829 m_AudioStream.m_vpSwrContexts.resize(new_size: m_AudioThreads);
830 for(size_t i = 0; i < m_AudioThreads; ++i)
831 {
832 m_AudioStream.m_vpSwrContexts[i] = swr_alloc();
833 if(!m_AudioStream.m_vpSwrContexts[i])
834 {
835 log_error("videorecorder", "Could not allocate resampling context");
836 return false;
837 }
838
839 /* set options */
840#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100)
841 dbg_assert(av_opt_set_chlayout(m_AudioStream.m_vpSwrContexts[i], "in_chlayout", &pContext->ch_layout, 0) == 0, "invalid option");
842#else
843 dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrContexts[i], "in_channel_count", pContext->channels, 0) == 0, "invalid option");
844#endif
845 if(av_opt_set_int(obj: m_AudioStream.m_vpSwrContexts[i], name: "in_sample_rate", val: m_pSound->MixingRate(), search_flags: 0) != 0)
846 {
847 log_error("videorecorder", "Could not set audio sample rate to %d", m_pSound->MixingRate());
848 return false;
849 }
850 dbg_assert(av_opt_set_sample_fmt(m_AudioStream.m_vpSwrContexts[i], "in_sample_fmt", AV_SAMPLE_FMT_S16, 0) == 0, "invalid option");
851#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100)
852 dbg_assert(av_opt_set_chlayout(m_AudioStream.m_vpSwrContexts[i], "out_chlayout", &pContext->ch_layout, 0) == 0, "invalid option");
853#else
854 dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrContexts[i], "out_channel_count", pContext->channels, 0) == 0, "invalid option");
855#endif
856 dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrContexts[i], "out_sample_rate", pContext->sample_rate, 0) == 0, "invalid option");
857 dbg_assert(av_opt_set_sample_fmt(m_AudioStream.m_vpSwrContexts[i], "out_sample_fmt", pContext->sample_fmt, 0) == 0, "invalid option");
858
859 /* initialize the resampling context */
860 const int ResamplingContextInitResult = swr_init(s: m_AudioStream.m_vpSwrContexts[i]);
861 if(ResamplingContextInitResult < 0)
862 {
863 char aError[AV_ERROR_MAX_STRING_SIZE];
864 av_strerror(errnum: ResamplingContextInitResult, errbuf: aError, errbuf_size: sizeof(aError));
865 log_error("videorecorder", "Could not initialize resampling context: %s", aError);
866 return false;
867 }
868 }
869
870 m_AudioFrameIndex = 0;
871 return true;
872}
873
874/* Add an output stream. */
875bool CVideo::AddStream(COutputStream *pStream, AVFormatContext *pFormatContext, const AVCodec **ppCodec, enum AVCodecID CodecId) const
876{
877 /* find the encoder */
878 *ppCodec = avcodec_find_encoder(id: CodecId);
879 if(!(*ppCodec))
880 {
881 log_error("videorecorder", "Could not find encoder for codec '%s'", avcodec_get_name(CodecId));
882 return false;
883 }
884
885 pStream->m_pStream = avformat_new_stream(s: pFormatContext, c: nullptr);
886 if(!pStream->m_pStream)
887 {
888 log_error("videorecorder", "Could not allocate stream");
889 return false;
890 }
891 pStream->m_pStream->id = pFormatContext->nb_streams - 1;
892 AVCodecContext *pContext = avcodec_alloc_context3(codec: *ppCodec);
893 if(!pContext)
894 {
895 log_error("videorecorder", "Could not allocate encoding context");
896 return false;
897 }
898 pStream->m_pCodecContext = pContext;
899
900#if defined(CONF_ARCH_IA32) || defined(CONF_ARCH_ARM) || defined(CONF_ARCH_WASM)
901 // use only 1 ffmpeg thread on 32-bit to save memory
902 pContext->thread_count = 1;
903#endif
904
905 switch((*ppCodec)->type)
906 {
907 case AVMEDIA_TYPE_AUDIO:
908 {
909 const AVSampleFormat *pSampleFormats = nullptr;
910 const int *pSampleRates = nullptr;
911#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(61, 13, 100)
912 avcodec_get_supported_config(avctx: pContext, codec: *ppCodec, config: AV_CODEC_CONFIG_SAMPLE_FORMAT, flags: 0, out_configs: (const void **)&pSampleFormats, out_num_configs: nullptr);
913 avcodec_get_supported_config(avctx: pContext, codec: *ppCodec, config: AV_CODEC_CONFIG_SAMPLE_RATE, flags: 0, out_configs: (const void **)&pSampleRates, out_num_configs: nullptr);
914#else
915 pSampleFormats = (*ppCodec)->sample_fmts;
916 pSampleRates = (*ppCodec)->supported_samplerates;
917#endif
918 pContext->sample_fmt = pSampleFormats ? pSampleFormats[0] : AV_SAMPLE_FMT_FLTP;
919 if(pSampleRates)
920 {
921 pContext->sample_rate = pSampleRates[0];
922 for(int i = 0; pSampleRates[i]; i++)
923 {
924 if(pSampleRates[i] == m_pSound->MixingRate())
925 {
926 pContext->sample_rate = m_pSound->MixingRate();
927 break;
928 }
929 }
930 }
931 else
932 {
933 pContext->sample_rate = m_pSound->MixingRate();
934 }
935 pContext->bit_rate = pContext->sample_rate * 2 * 16;
936#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100)
937 dbg_assert(av_channel_layout_from_mask(&pContext->ch_layout, AV_CH_LAYOUT_STEREO) == 0, "Failed to set channel layout");
938#else
939 pContext->channels = 2;
940 pContext->channel_layout = AV_CH_LAYOUT_STEREO;
941#endif
942
943 pStream->m_pStream->time_base.num = 1;
944 pStream->m_pStream->time_base.den = pContext->sample_rate;
945 break;
946 }
947
948 case AVMEDIA_TYPE_VIDEO:
949 pContext->codec_id = CodecId;
950
951 pContext->bit_rate = 400000;
952 /* Resolution must be a multiple of two. */
953 pContext->width = m_Width;
954 pContext->height = m_Height % 2 == 0 ? m_Height : m_Height - 1;
955 /* timebase: This is the fundamental unit of time (in seconds) in terms
956 * of which frame timestamps are represented. For fixed-fps content,
957 * timebase should be 1/framerate and timestamp increments should be
958 * identical to 1. */
959 pStream->m_pStream->time_base.num = 1;
960 pStream->m_pStream->time_base.den = m_FPS;
961 pContext->time_base = pStream->m_pStream->time_base;
962
963 pContext->gop_size = 12; /* emit one intra frame every twelve frames at most */
964 pContext->pix_fmt = AV_PIX_FMT_YUV420P;
965 pContext->colorspace = COLOR_SPACE;
966 if(pContext->codec_id == AV_CODEC_ID_MPEG2VIDEO)
967 {
968 /* just for testing, we also add B-frames */
969 pContext->max_b_frames = 2;
970 }
971 if(pContext->codec_id == AV_CODEC_ID_MPEG1VIDEO)
972 {
973 /* Needed to avoid using macroblocks in which some coeffs overflow.
974 * This does not happen with normal video, it just happens here as
975 * the motion of the chroma plane does not match the luma plane. */
976 pContext->mb_decision = 2;
977 }
978 if(CodecId == AV_CODEC_ID_H264)
979 {
980 static const char *s_apPresets[10] = {"ultrafast", "superfast", "veryfast", "faster", "fast", "medium", "slow", "slower", "veryslow", "placebo"};
981 dbg_assert(g_Config.m_ClVideoX264Preset < (int)std::size(s_apPresets), "preset index invalid: %d", g_Config.m_ClVideoX264Preset);
982 dbg_assert(av_opt_set(pContext->priv_data, "preset", s_apPresets[g_Config.m_ClVideoX264Preset], 0) == 0, "invalid option");
983 dbg_assert(av_opt_set_int(pContext->priv_data, "crf", g_Config.m_ClVideoX264Crf, 0) == 0, "invalid option");
984 }
985 break;
986
987 default:
988 break;
989 }
990
991 /* Some formats want stream headers to be separate. */
992 if(pFormatContext->oformat->flags & AVFMT_GLOBALHEADER)
993 pContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
994
995 return true;
996}
997
998void CVideo::WriteFrame(COutputStream *pStream, size_t ThreadIndex)
999{
1000 AVPacket *pPacket = av_packet_alloc();
1001 if(pPacket == nullptr)
1002 {
1003 log_error("videorecorder", "Could not allocate packet");
1004 return;
1005 }
1006
1007 pPacket->data = nullptr;
1008 pPacket->size = 0;
1009
1010 avcodec_send_frame(avctx: pStream->m_pCodecContext, frame: pStream->m_vpFrames[ThreadIndex]);
1011 int RecvResult = 0;
1012 do
1013 {
1014 RecvResult = avcodec_receive_packet(avctx: pStream->m_pCodecContext, avpkt: pPacket);
1015 if(!RecvResult)
1016 {
1017 /* rescale output packet timestamp values from codec to stream timebase */
1018 av_packet_rescale_ts(pkt: pPacket, tb_src: pStream->m_pCodecContext->time_base, tb_dst: pStream->m_pStream->time_base);
1019 pPacket->stream_index = pStream->m_pStream->index;
1020
1021 const int WriteFrameResult = av_interleaved_write_frame(s: m_pFormatContext, pkt: pPacket);
1022 if(WriteFrameResult < 0)
1023 {
1024 char aError[AV_ERROR_MAX_STRING_SIZE];
1025 av_strerror(errnum: WriteFrameResult, errbuf: aError, errbuf_size: sizeof(aError));
1026 log_error("videorecorder", "Could not write video frame: %s", aError);
1027 }
1028 }
1029 else
1030 break;
1031 } while(true);
1032
1033 if(RecvResult && RecvResult != AVERROR(EAGAIN))
1034 {
1035 char aError[AV_ERROR_MAX_STRING_SIZE];
1036 av_strerror(errnum: RecvResult, errbuf: aError, errbuf_size: sizeof(aError));
1037 log_error("videorecorder", "Could not encode video frame: %s", aError);
1038 }
1039
1040 av_packet_free(pkt: &pPacket);
1041}
1042
1043void CVideo::FinishFrames(COutputStream *pStream)
1044{
1045 if(!pStream->m_pCodecContext || !avcodec_is_open(s: pStream->m_pCodecContext))
1046 return;
1047
1048 AVPacket *pPacket = av_packet_alloc();
1049 if(pPacket == nullptr)
1050 {
1051 log_error("videorecorder", "Could not allocate packet");
1052 return;
1053 }
1054
1055 pPacket->data = nullptr;
1056 pPacket->size = 0;
1057
1058 avcodec_send_frame(avctx: pStream->m_pCodecContext, frame: nullptr);
1059 int RecvResult = 0;
1060 do
1061 {
1062 RecvResult = avcodec_receive_packet(avctx: pStream->m_pCodecContext, avpkt: pPacket);
1063 if(!RecvResult)
1064 {
1065 /* rescale output packet timestamp values from codec to stream timebase */
1066 av_packet_rescale_ts(pkt: pPacket, tb_src: pStream->m_pCodecContext->time_base, tb_dst: pStream->m_pStream->time_base);
1067 pPacket->stream_index = pStream->m_pStream->index;
1068
1069 const int WriteFrameResult = av_interleaved_write_frame(s: m_pFormatContext, pkt: pPacket);
1070 if(WriteFrameResult < 0)
1071 {
1072 char aError[AV_ERROR_MAX_STRING_SIZE];
1073 av_strerror(errnum: WriteFrameResult, errbuf: aError, errbuf_size: sizeof(aError));
1074 log_error("videorecorder", "Could not write video frame: %s", aError);
1075 }
1076 }
1077 else
1078 break;
1079 } while(true);
1080
1081 if(RecvResult && RecvResult != AVERROR_EOF)
1082 {
1083 char aError[AV_ERROR_MAX_STRING_SIZE];
1084 av_strerror(errnum: RecvResult, errbuf: aError, errbuf_size: sizeof(aError));
1085 log_error("videorecorder", "Could not finish recording: %s", aError);
1086 }
1087
1088 av_packet_free(pkt: &pPacket);
1089}
1090
1091void CVideo::CloseStream(COutputStream *pStream)
1092{
1093 avcodec_free_context(avctx: &pStream->m_pCodecContext);
1094
1095 for(auto *pFrame : pStream->m_vpFrames)
1096 av_frame_free(frame: &pFrame);
1097 pStream->m_vpFrames.clear();
1098
1099 for(auto *pFrame : pStream->m_vpTmpFrames)
1100 av_frame_free(frame: &pFrame);
1101 pStream->m_vpTmpFrames.clear();
1102
1103 for(auto *pSwsContext : pStream->m_vpSwsContexts)
1104 sws_freeContext(swsContext: pSwsContext);
1105 pStream->m_vpSwsContexts.clear();
1106
1107 for(auto *pSwrContext : pStream->m_vpSwrContexts)
1108 swr_free(s: &pSwrContext);
1109 pStream->m_vpSwrContexts.clear();
1110}
1111
1112#endif
1113