1 | #if defined(CONF_VIDEORECORDER) |
2 | |
3 | #include "video.h" |
4 | |
5 | #include <base/log.h> |
6 | |
7 | #include <engine/graphics.h> |
8 | #include <engine/shared/config.h> |
9 | #include <engine/sound.h> |
10 | #include <engine/storage.h> |
11 | |
12 | extern "C" { |
13 | #include <libavutil/avutil.h> |
14 | #include <libavutil/opt.h> |
15 | #include <libswresample/swresample.h> |
16 | #include <libswscale/swscale.h> |
17 | }; |
18 | |
19 | #include <chrono> |
20 | #include <memory> |
21 | #include <mutex> |
22 | #include <thread> |
23 | |
24 | using namespace std::chrono_literals; |
25 | |
26 | // This code is mostly stolen from https://github.com/FFmpeg/FFmpeg/blob/master/doc/examples/muxing.c |
27 | |
28 | #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */ |
29 | |
30 | #if LIBAVCODEC_VERSION_MAJOR >= 60 |
31 | #define FRAME_NUM frame_num |
32 | #else |
33 | #define FRAME_NUM frame_number |
34 | #endif |
35 | |
36 | const size_t FORMAT_GL_NCHANNELS = 4; |
37 | CLock g_WriteLock; |
38 | |
39 | static LEVEL AvLevelToLogLevel(int Level) |
40 | { |
41 | switch(Level) |
42 | { |
43 | case AV_LOG_PANIC: |
44 | case AV_LOG_FATAL: |
45 | case AV_LOG_ERROR: |
46 | return LEVEL_ERROR; |
47 | case AV_LOG_WARNING: |
48 | return LEVEL_WARN; |
49 | case AV_LOG_INFO: |
50 | return LEVEL_INFO; |
51 | case AV_LOG_VERBOSE: |
52 | case AV_LOG_DEBUG: |
53 | return LEVEL_DEBUG; |
54 | case AV_LOG_TRACE: |
55 | return LEVEL_TRACE; |
56 | default: |
57 | dbg_assert(false, "invalid log level" ); |
58 | dbg_break(); |
59 | } |
60 | } |
61 | |
62 | void AvLogCallback(void *pUser, int Level, const char *pFormat, va_list VarArgs) |
63 | GNUC_ATTRIBUTE((format(printf, 3, 0))); |
64 | |
65 | void AvLogCallback(void *pUser, int Level, const char *pFormat, va_list VarArgs) |
66 | { |
67 | const LEVEL LogLevel = AvLevelToLogLevel(Level); |
68 | if(LogLevel <= LEVEL_INFO) |
69 | { |
70 | log_log_v(level: LogLevel, sys: "videorecorder/libav" , fmt: pFormat, args: VarArgs); |
71 | } |
72 | } |
73 | |
74 | void CVideo::Init() |
75 | { |
76 | av_log_set_callback(callback: AvLogCallback); |
77 | } |
78 | |
79 | CVideo::CVideo(IGraphics *pGraphics, ISound *pSound, IStorage *pStorage, int Width, int Height, const char *pName) : |
80 | m_pGraphics(pGraphics), |
81 | m_pStorage(pStorage), |
82 | m_pSound(pSound) |
83 | { |
84 | m_pFormatContext = nullptr; |
85 | m_pFormat = nullptr; |
86 | m_pOptDict = nullptr; |
87 | |
88 | m_pVideoCodec = nullptr; |
89 | m_pAudioCodec = nullptr; |
90 | |
91 | m_Width = Width; |
92 | m_Height = Height; |
93 | str_copy(dst&: m_aName, src: pName); |
94 | |
95 | m_FPS = g_Config.m_ClVideoRecorderFPS; |
96 | |
97 | m_Recording = false; |
98 | m_Started = false; |
99 | m_Stopped = false; |
100 | m_ProcessingVideoFrame = 0; |
101 | m_ProcessingAudioFrame = 0; |
102 | |
103 | m_HasAudio = m_pSound->IsSoundEnabled() && g_Config.m_ClVideoSndEnable; |
104 | |
105 | dbg_assert(ms_pCurrentVideo == nullptr, "ms_pCurrentVideo is NOT set to nullptr while creating a new Video." ); |
106 | |
107 | ms_TickTime = time_freq() / m_FPS; |
108 | ms_pCurrentVideo = this; |
109 | } |
110 | |
111 | CVideo::~CVideo() |
112 | { |
113 | ms_pCurrentVideo = nullptr; |
114 | } |
115 | |
116 | bool CVideo::Start() |
117 | { |
118 | dbg_assert(!m_Started, "Already started" ); |
119 | |
120 | // wait for the graphic thread to idle |
121 | m_pGraphics->WaitForIdle(); |
122 | |
123 | m_AudioStream = {}; |
124 | m_VideoStream = {}; |
125 | |
126 | char aWholePath[IO_MAX_PATH_LENGTH]; |
127 | IOHANDLE File = m_pStorage->OpenFile(pFilename: m_aName, Flags: IOFLAG_WRITE, Type: IStorage::TYPE_SAVE, pBuffer: aWholePath, BufferSize: sizeof(aWholePath)); |
128 | if(File) |
129 | { |
130 | io_close(io: File); |
131 | } |
132 | else |
133 | { |
134 | log_error("videorecorder" , "Could not open file '%s'" , aWholePath); |
135 | return false; |
136 | } |
137 | |
138 | const int FormatAllocResult = avformat_alloc_output_context2(ctx: &m_pFormatContext, oformat: nullptr, format_name: "mp4" , filename: aWholePath); |
139 | if(FormatAllocResult < 0 || !m_pFormatContext) |
140 | { |
141 | char aError[AV_ERROR_MAX_STRING_SIZE]; |
142 | av_strerror(errnum: FormatAllocResult, errbuf: aError, errbuf_size: sizeof(aError)); |
143 | log_error("videorecorder" , "Could not create format context: %s" , aError); |
144 | return false; |
145 | } |
146 | |
147 | m_pFormat = m_pFormatContext->oformat; |
148 | |
149 | #if defined(CONF_ARCH_IA32) || defined(CONF_ARCH_ARM) |
150 | // use only the minimum of 2 threads on 32-bit to save memory |
151 | m_VideoThreads = 2; |
152 | m_AudioThreads = 2; |
153 | #else |
154 | m_VideoThreads = std::thread::hardware_concurrency() + 2; |
155 | // audio gets a bit less |
156 | m_AudioThreads = (std::thread::hardware_concurrency() / 2) + 2; |
157 | #endif |
158 | |
159 | m_CurVideoThreadIndex = 0; |
160 | m_CurAudioThreadIndex = 0; |
161 | |
162 | size_t GLNVals = FORMAT_GL_NCHANNELS * m_Width * m_Height; |
163 | m_vVideoBuffers.resize(new_size: m_VideoThreads); |
164 | for(size_t i = 0; i < m_VideoThreads; ++i) |
165 | { |
166 | m_vVideoBuffers[i].m_vBuffer.resize(new_size: GLNVals * sizeof(uint8_t)); |
167 | } |
168 | |
169 | m_vAudioBuffers.resize(new_size: m_AudioThreads); |
170 | |
171 | /* Add the audio and video streams using the default format codecs |
172 | * and initialize the codecs. */ |
173 | if(m_pFormat->video_codec != AV_CODEC_ID_NONE) |
174 | { |
175 | if(!AddStream(pStream: &m_VideoStream, pFormatContext: m_pFormatContext, ppCodec: &m_pVideoCodec, CodecId: m_pFormat->video_codec)) |
176 | return false; |
177 | } |
178 | else |
179 | { |
180 | log_error("videorecorder" , "Could not determine default video stream codec" ); |
181 | return false; |
182 | } |
183 | |
184 | if(m_HasAudio) |
185 | { |
186 | if(m_pFormat->audio_codec != AV_CODEC_ID_NONE) |
187 | { |
188 | if(!AddStream(pStream: &m_AudioStream, pFormatContext: m_pFormatContext, ppCodec: &m_pAudioCodec, CodecId: m_pFormat->audio_codec)) |
189 | return false; |
190 | } |
191 | else |
192 | { |
193 | log_error("videorecorder" , "Could not determine default audio stream codec" ); |
194 | return false; |
195 | } |
196 | } |
197 | |
198 | m_vpVideoThreads.resize(new_size: m_VideoThreads); |
199 | for(size_t i = 0; i < m_VideoThreads; ++i) |
200 | { |
201 | m_vpVideoThreads[i] = std::make_unique<CVideoRecorderThread>(); |
202 | } |
203 | for(size_t i = 0; i < m_VideoThreads; ++i) |
204 | { |
205 | std::unique_lock<std::mutex> Lock(m_vpVideoThreads[i]->m_Mutex); |
206 | m_vpVideoThreads[i]->m_Thread = std::thread([this, i]() REQUIRES(!g_WriteLock) { RunVideoThread(ParentThreadIndex: i == 0 ? (m_VideoThreads - 1) : (i - 1), ThreadIndex: i); }); |
207 | m_vpVideoThreads[i]->m_Cond.wait(lock&: Lock, p: [this, i]() -> bool { return m_vpVideoThreads[i]->m_Started; }); |
208 | } |
209 | |
210 | m_vpAudioThreads.resize(new_size: m_AudioThreads); |
211 | for(size_t i = 0; i < m_AudioThreads; ++i) |
212 | { |
213 | m_vpAudioThreads[i] = std::make_unique<CAudioRecorderThread>(); |
214 | } |
215 | for(size_t i = 0; i < m_AudioThreads; ++i) |
216 | { |
217 | std::unique_lock<std::mutex> Lock(m_vpAudioThreads[i]->m_Mutex); |
218 | m_vpAudioThreads[i]->m_Thread = std::thread([this, i]() REQUIRES(!g_WriteLock) { RunAudioThread(ParentThreadIndex: i == 0 ? (m_AudioThreads - 1) : (i - 1), ThreadIndex: i); }); |
219 | m_vpAudioThreads[i]->m_Cond.wait(lock&: Lock, p: [this, i]() -> bool { return m_vpAudioThreads[i]->m_Started; }); |
220 | } |
221 | |
222 | /* Now that all the parameters are set, we can open the audio and |
223 | * video codecs and allocate the necessary encode buffers. */ |
224 | if(!OpenVideo()) |
225 | return false; |
226 | |
227 | if(m_HasAudio && !OpenAudio()) |
228 | return false; |
229 | |
230 | /* open the output file, if needed */ |
231 | if(!(m_pFormat->flags & AVFMT_NOFILE)) |
232 | { |
233 | const int OpenResult = avio_open(s: &m_pFormatContext->pb, url: aWholePath, AVIO_FLAG_WRITE); |
234 | if(OpenResult < 0) |
235 | { |
236 | char aError[AV_ERROR_MAX_STRING_SIZE]; |
237 | av_strerror(errnum: OpenResult, errbuf: aError, errbuf_size: sizeof(aError)); |
238 | log_error("videorecorder" , "Could not open file '%s': %s" , aWholePath, aError); |
239 | return false; |
240 | } |
241 | } |
242 | |
243 | m_VideoStream.m_vpSwsContexts.reserve(n: m_VideoThreads); |
244 | |
245 | for(size_t i = 0; i < m_VideoThreads; ++i) |
246 | { |
247 | if(m_VideoStream.m_vpSwsContexts.size() <= i) |
248 | m_VideoStream.m_vpSwsContexts.emplace_back(args: nullptr); |
249 | |
250 | if(!m_VideoStream.m_vpSwsContexts[i]) |
251 | { |
252 | m_VideoStream.m_vpSwsContexts[i] = sws_getCachedContext( |
253 | context: m_VideoStream.m_vpSwsContexts[i], |
254 | srcW: m_VideoStream.m_pCodecContext->width, srcH: m_VideoStream.m_pCodecContext->height, srcFormat: AV_PIX_FMT_RGBA, |
255 | dstW: m_VideoStream.m_pCodecContext->width, dstH: m_VideoStream.m_pCodecContext->height, dstFormat: AV_PIX_FMT_YUV420P, |
256 | flags: 0, srcFilter: 0, dstFilter: 0, param: 0); |
257 | } |
258 | } |
259 | |
260 | /* Write the stream header, if any. */ |
261 | const int = avformat_write_header(s: m_pFormatContext, options: &m_pOptDict); |
262 | if(WriteHeaderResult < 0) |
263 | { |
264 | char aError[AV_ERROR_MAX_STRING_SIZE]; |
265 | av_strerror(errnum: WriteHeaderResult, errbuf: aError, errbuf_size: sizeof(aError)); |
266 | log_error("videorecorder" , "Could not write header: %s" , aError); |
267 | return false; |
268 | } |
269 | |
270 | m_Recording = true; |
271 | m_Started = true; |
272 | m_Stopped = false; |
273 | ms_Time = time_get(); |
274 | return true; |
275 | } |
276 | |
277 | void CVideo::Pause(bool Pause) |
278 | { |
279 | if(ms_pCurrentVideo) |
280 | m_Recording = !Pause; |
281 | } |
282 | |
283 | void CVideo::Stop() |
284 | { |
285 | dbg_assert(!m_Stopped, "Already stopped" ); |
286 | |
287 | m_pGraphics->WaitForIdle(); |
288 | |
289 | for(auto &pVideoThread : m_vpVideoThreads) |
290 | { |
291 | { |
292 | std::unique_lock<std::mutex> Lock(pVideoThread->m_Mutex); |
293 | pVideoThread->m_Finished = true; |
294 | pVideoThread->m_Cond.notify_all(); |
295 | } |
296 | |
297 | pVideoThread->m_Thread.join(); |
298 | } |
299 | m_vpVideoThreads.clear(); |
300 | |
301 | for(auto &pAudioThread : m_vpAudioThreads) |
302 | { |
303 | { |
304 | std::unique_lock<std::mutex> Lock(pAudioThread->m_Mutex); |
305 | pAudioThread->m_Finished = true; |
306 | pAudioThread->m_Cond.notify_all(); |
307 | } |
308 | |
309 | pAudioThread->m_Thread.join(); |
310 | } |
311 | m_vpAudioThreads.clear(); |
312 | |
313 | while(m_ProcessingVideoFrame > 0 || m_ProcessingAudioFrame > 0) |
314 | std::this_thread::sleep_for(rtime: 10us); |
315 | |
316 | m_Recording = false; |
317 | |
318 | FinishFrames(pStream: &m_VideoStream); |
319 | |
320 | if(m_HasAudio) |
321 | FinishFrames(pStream: &m_AudioStream); |
322 | |
323 | if(m_pFormatContext && m_Started) |
324 | av_write_trailer(s: m_pFormatContext); |
325 | |
326 | CloseStream(pStream: &m_VideoStream); |
327 | |
328 | if(m_HasAudio) |
329 | CloseStream(pStream: &m_AudioStream); |
330 | |
331 | if(m_pFormatContext) |
332 | { |
333 | if(!(m_pFormat->flags & AVFMT_NOFILE)) |
334 | avio_closep(s: &m_pFormatContext->pb); |
335 | |
336 | avformat_free_context(s: m_pFormatContext); |
337 | } |
338 | |
339 | ISound *volatile pSound = m_pSound; |
340 | |
341 | pSound->PauseAudioDevice(); |
342 | delete ms_pCurrentVideo; |
343 | pSound->UnpauseAudioDevice(); |
344 | |
345 | m_Stopped = true; |
346 | } |
347 | |
348 | void CVideo::NextVideoFrameThread() |
349 | { |
350 | if(m_Recording) |
351 | { |
352 | m_VideoFrameIndex += 1; |
353 | if(m_VideoFrameIndex >= 2) |
354 | { |
355 | m_ProcessingVideoFrame.fetch_add(i: 1); |
356 | |
357 | size_t NextVideoThreadIndex = m_CurVideoThreadIndex + 1; |
358 | if(NextVideoThreadIndex == m_VideoThreads) |
359 | NextVideoThreadIndex = 0; |
360 | |
361 | // always wait for the next video thread too, to prevent a dead lock |
362 | { |
363 | auto *pVideoThread = m_vpVideoThreads[NextVideoThreadIndex].get(); |
364 | std::unique_lock<std::mutex> Lock(pVideoThread->m_Mutex); |
365 | |
366 | if(pVideoThread->m_HasVideoFrame) |
367 | { |
368 | pVideoThread->m_Cond.wait(lock&: Lock, p: [&pVideoThread]() -> bool { return !pVideoThread->m_HasVideoFrame; }); |
369 | } |
370 | } |
371 | |
372 | // after reading the graphic libraries' frame buffer, go threaded |
373 | { |
374 | auto *pVideoThread = m_vpVideoThreads[m_CurVideoThreadIndex].get(); |
375 | std::unique_lock<std::mutex> Lock(pVideoThread->m_Mutex); |
376 | |
377 | if(pVideoThread->m_HasVideoFrame) |
378 | { |
379 | pVideoThread->m_Cond.wait(lock&: Lock, p: [&pVideoThread]() -> bool { return !pVideoThread->m_HasVideoFrame; }); |
380 | } |
381 | |
382 | UpdateVideoBufferFromGraphics(ThreadIndex: m_CurVideoThreadIndex); |
383 | |
384 | pVideoThread->m_HasVideoFrame = true; |
385 | { |
386 | std::unique_lock<std::mutex> LockParent(pVideoThread->m_VideoFillMutex); |
387 | pVideoThread->m_VideoFrameToFill = m_VideoFrameIndex; |
388 | } |
389 | pVideoThread->m_Cond.notify_all(); |
390 | } |
391 | |
392 | ++m_CurVideoThreadIndex; |
393 | if(m_CurVideoThreadIndex == m_VideoThreads) |
394 | m_CurVideoThreadIndex = 0; |
395 | } |
396 | } |
397 | } |
398 | |
399 | void CVideo::NextVideoFrame() |
400 | { |
401 | if(m_Recording) |
402 | { |
403 | ms_Time += ms_TickTime; |
404 | ms_LocalTime = (ms_Time - ms_LocalStartTime) / (float)time_freq(); |
405 | } |
406 | } |
407 | |
408 | void CVideo::NextAudioFrameTimeline(ISoundMixFunc Mix) |
409 | { |
410 | if(m_Recording && m_HasAudio) |
411 | { |
412 | double SamplesPerFrame = (double)m_AudioStream.m_pCodecContext->sample_rate / m_FPS; |
413 | while(m_AudioStream.m_SamplesFrameCount >= m_AudioStream.m_SamplesCount) |
414 | { |
415 | NextAudioFrame(Mix); |
416 | } |
417 | m_AudioStream.m_SamplesFrameCount += SamplesPerFrame; |
418 | } |
419 | } |
420 | |
421 | void CVideo::NextAudioFrame(ISoundMixFunc Mix) |
422 | { |
423 | if(m_Recording && m_HasAudio) |
424 | { |
425 | m_AudioFrameIndex += 1; |
426 | |
427 | m_ProcessingAudioFrame.fetch_add(i: 1); |
428 | |
429 | size_t NextAudioThreadIndex = m_CurAudioThreadIndex + 1; |
430 | if(NextAudioThreadIndex == m_AudioThreads) |
431 | NextAudioThreadIndex = 0; |
432 | |
433 | // always wait for the next Audio thread too, to prevent a dead lock |
434 | |
435 | { |
436 | auto *pAudioThread = m_vpAudioThreads[NextAudioThreadIndex].get(); |
437 | std::unique_lock<std::mutex> Lock(pAudioThread->m_Mutex); |
438 | |
439 | if(pAudioThread->m_HasAudioFrame) |
440 | { |
441 | pAudioThread->m_Cond.wait(lock&: Lock, p: [&pAudioThread]() -> bool { return !pAudioThread->m_HasAudioFrame; }); |
442 | } |
443 | } |
444 | |
445 | // after reading the graphic libraries' frame buffer, go threaded |
446 | { |
447 | auto *pAudioThread = m_vpAudioThreads[m_CurAudioThreadIndex].get(); |
448 | |
449 | std::unique_lock<std::mutex> Lock(pAudioThread->m_Mutex); |
450 | |
451 | if(pAudioThread->m_HasAudioFrame) |
452 | { |
453 | pAudioThread->m_Cond.wait(lock&: Lock, p: [&pAudioThread]() -> bool { return !pAudioThread->m_HasAudioFrame; }); |
454 | } |
455 | |
456 | Mix(m_vAudioBuffers[m_CurAudioThreadIndex].m_aBuffer, std::size(m_vAudioBuffers[m_CurAudioThreadIndex].m_aBuffer) / 2 / 2); // two channels |
457 | |
458 | int64_t DstNbSamples = av_rescale_rnd( |
459 | a: swr_get_delay(s: m_AudioStream.m_vpSwrContexts[m_CurAudioThreadIndex], base: m_AudioStream.m_pCodecContext->sample_rate) + |
460 | m_AudioStream.m_vpFrames[m_CurAudioThreadIndex]->nb_samples, |
461 | b: m_AudioStream.m_pCodecContext->sample_rate, |
462 | c: m_AudioStream.m_pCodecContext->sample_rate, rnd: AV_ROUND_UP); |
463 | |
464 | pAudioThread->m_SampleCountStart = m_AudioStream.m_SamplesCount; |
465 | m_AudioStream.m_SamplesCount += DstNbSamples; |
466 | |
467 | pAudioThread->m_HasAudioFrame = true; |
468 | { |
469 | std::unique_lock<std::mutex> LockParent(pAudioThread->m_AudioFillMutex); |
470 | pAudioThread->m_AudioFrameToFill = m_AudioFrameIndex; |
471 | } |
472 | pAudioThread->m_Cond.notify_all(); |
473 | } |
474 | |
475 | ++m_CurAudioThreadIndex; |
476 | if(m_CurAudioThreadIndex == m_AudioThreads) |
477 | m_CurAudioThreadIndex = 0; |
478 | } |
479 | } |
480 | |
481 | void CVideo::RunAudioThread(size_t ParentThreadIndex, size_t ThreadIndex) |
482 | { |
483 | auto *pThreadData = m_vpAudioThreads[ThreadIndex].get(); |
484 | auto *pParentThreadData = m_vpAudioThreads[ParentThreadIndex].get(); |
485 | std::unique_lock<std::mutex> Lock(pThreadData->m_Mutex); |
486 | pThreadData->m_Started = true; |
487 | pThreadData->m_Cond.notify_all(); |
488 | |
489 | while(!pThreadData->m_Finished) |
490 | { |
491 | pThreadData->m_Cond.wait(lock&: Lock, p: [&pThreadData]() -> bool { return pThreadData->m_HasAudioFrame || pThreadData->m_Finished; }); |
492 | pThreadData->m_Cond.notify_all(); |
493 | |
494 | if(pThreadData->m_HasAudioFrame) |
495 | { |
496 | FillAudioFrame(ThreadIndex); |
497 | // check if we need to wait for the parent to finish |
498 | { |
499 | std::unique_lock<std::mutex> LockParent(pParentThreadData->m_AudioFillMutex); |
500 | if(pParentThreadData->m_AudioFrameToFill != 0 && pThreadData->m_AudioFrameToFill >= pParentThreadData->m_AudioFrameToFill) |
501 | { |
502 | // wait for the parent to finish its frame |
503 | pParentThreadData->m_AudioFillCond.wait(lock&: LockParent, p: [&pParentThreadData]() -> bool { return pParentThreadData->m_AudioFrameToFill == 0; }); |
504 | } |
505 | } |
506 | { |
507 | std::unique_lock<std::mutex> LockAudio(pThreadData->m_AudioFillMutex); |
508 | |
509 | { |
510 | CLockScope ls(g_WriteLock); |
511 | m_AudioStream.m_vpFrames[ThreadIndex]->pts = av_rescale_q(a: pThreadData->m_SampleCountStart, bq: AVRational{.num: 1, .den: m_AudioStream.m_pCodecContext->sample_rate}, cq: m_AudioStream.m_pCodecContext->time_base); |
512 | WriteFrame(pStream: &m_AudioStream, ThreadIndex); |
513 | } |
514 | |
515 | pThreadData->m_AudioFrameToFill = 0; |
516 | pThreadData->m_AudioFillCond.notify_all(); |
517 | pThreadData->m_Cond.notify_all(); |
518 | } |
519 | m_ProcessingAudioFrame.fetch_sub(i: 1); |
520 | |
521 | pThreadData->m_HasAudioFrame = false; |
522 | } |
523 | } |
524 | } |
525 | |
526 | void CVideo::FillAudioFrame(size_t ThreadIndex) |
527 | { |
528 | const int FillArrayResult = av_samples_fill_arrays( |
529 | audio_data: (uint8_t **)m_AudioStream.m_vpTmpFrames[ThreadIndex]->data, |
530 | linesize: nullptr, // pointer to linesize (int*) |
531 | buf: (const uint8_t *)m_vAudioBuffers[ThreadIndex].m_aBuffer, |
532 | nb_channels: 2, // channels |
533 | nb_samples: m_AudioStream.m_vpTmpFrames[ThreadIndex]->nb_samples, |
534 | sample_fmt: AV_SAMPLE_FMT_S16, |
535 | align: 0 // align |
536 | ); |
537 | if(FillArrayResult < 0) |
538 | { |
539 | char aError[AV_ERROR_MAX_STRING_SIZE]; |
540 | av_strerror(errnum: FillArrayResult, errbuf: aError, errbuf_size: sizeof(aError)); |
541 | log_error("videorecorder" , "Could not fill audio frame: %s" , aError); |
542 | return; |
543 | } |
544 | |
545 | const int MakeWriteableResult = av_frame_make_writable(frame: m_AudioStream.m_vpFrames[ThreadIndex]); |
546 | if(MakeWriteableResult < 0) |
547 | { |
548 | char aError[AV_ERROR_MAX_STRING_SIZE]; |
549 | av_strerror(errnum: MakeWriteableResult, errbuf: aError, errbuf_size: sizeof(aError)); |
550 | log_error("videorecorder" , "Could not make audio frame writeable: %s" , aError); |
551 | return; |
552 | } |
553 | |
554 | /* convert to destination format */ |
555 | const int ConvertResult = swr_convert( |
556 | s: m_AudioStream.m_vpSwrContexts[ThreadIndex], |
557 | out: m_AudioStream.m_vpFrames[ThreadIndex]->data, |
558 | out_count: m_AudioStream.m_vpFrames[ThreadIndex]->nb_samples, |
559 | in: (const uint8_t **)m_AudioStream.m_vpTmpFrames[ThreadIndex]->data, |
560 | in_count: m_AudioStream.m_vpTmpFrames[ThreadIndex]->nb_samples); |
561 | if(ConvertResult < 0) |
562 | { |
563 | char aError[AV_ERROR_MAX_STRING_SIZE]; |
564 | av_strerror(errnum: ConvertResult, errbuf: aError, errbuf_size: sizeof(aError)); |
565 | log_error("videorecorder" , "Could not convert audio frame: %s" , aError); |
566 | return; |
567 | } |
568 | } |
569 | |
570 | void CVideo::RunVideoThread(size_t ParentThreadIndex, size_t ThreadIndex) |
571 | { |
572 | auto *pThreadData = m_vpVideoThreads[ThreadIndex].get(); |
573 | auto *pParentThreadData = m_vpVideoThreads[ParentThreadIndex].get(); |
574 | std::unique_lock<std::mutex> Lock(pThreadData->m_Mutex); |
575 | pThreadData->m_Started = true; |
576 | pThreadData->m_Cond.notify_all(); |
577 | |
578 | while(!pThreadData->m_Finished) |
579 | { |
580 | pThreadData->m_Cond.wait(lock&: Lock, p: [&pThreadData]() -> bool { return pThreadData->m_HasVideoFrame || pThreadData->m_Finished; }); |
581 | pThreadData->m_Cond.notify_all(); |
582 | |
583 | if(pThreadData->m_HasVideoFrame) |
584 | { |
585 | FillVideoFrame(ThreadIndex); |
586 | // check if we need to wait for the parent to finish |
587 | { |
588 | std::unique_lock<std::mutex> LockParent(pParentThreadData->m_VideoFillMutex); |
589 | if(pParentThreadData->m_VideoFrameToFill != 0 && pThreadData->m_VideoFrameToFill >= pParentThreadData->m_VideoFrameToFill) |
590 | { |
591 | // wait for the parent to finish its frame |
592 | pParentThreadData->m_VideoFillCond.wait(lock&: LockParent, p: [&pParentThreadData]() -> bool { return pParentThreadData->m_VideoFrameToFill == 0; }); |
593 | } |
594 | } |
595 | { |
596 | std::unique_lock<std::mutex> LockVideo(pThreadData->m_VideoFillMutex); |
597 | { |
598 | CLockScope ls(g_WriteLock); |
599 | m_VideoStream.m_vpFrames[ThreadIndex]->pts = (int64_t)m_VideoStream.m_pCodecContext->FRAME_NUM; |
600 | WriteFrame(pStream: &m_VideoStream, ThreadIndex); |
601 | } |
602 | |
603 | pThreadData->m_VideoFrameToFill = 0; |
604 | pThreadData->m_VideoFillCond.notify_all(); |
605 | pThreadData->m_Cond.notify_all(); |
606 | } |
607 | m_ProcessingVideoFrame.fetch_sub(i: 1); |
608 | |
609 | pThreadData->m_HasVideoFrame = false; |
610 | } |
611 | } |
612 | } |
613 | |
614 | void CVideo::FillVideoFrame(size_t ThreadIndex) |
615 | { |
616 | const int InLineSize = 4 * m_VideoStream.m_pCodecContext->width; |
617 | auto *pRGBAData = m_vVideoBuffers[ThreadIndex].m_vBuffer.data(); |
618 | sws_scale(c: m_VideoStream.m_vpSwsContexts[ThreadIndex], srcSlice: (const uint8_t *const *)&pRGBAData, srcStride: &InLineSize, srcSliceY: 0, |
619 | srcSliceH: m_VideoStream.m_pCodecContext->height, dst: m_VideoStream.m_vpFrames[ThreadIndex]->data, dstStride: m_VideoStream.m_vpFrames[ThreadIndex]->linesize); |
620 | } |
621 | |
622 | void CVideo::UpdateVideoBufferFromGraphics(size_t ThreadIndex) |
623 | { |
624 | uint32_t Width; |
625 | uint32_t Height; |
626 | CImageInfo::EImageFormat Format; |
627 | m_pGraphics->GetReadPresentedImageDataFuncUnsafe()(Width, Height, Format, m_vVideoBuffers[ThreadIndex].m_vBuffer); |
628 | dbg_assert((int)Width == m_Width && (int)Height == m_Height, "Size mismatch between video and graphics" ); |
629 | dbg_assert(Format == CImageInfo::FORMAT_RGBA, "Unexpected image format" ); |
630 | } |
631 | |
632 | AVFrame *CVideo::AllocPicture(enum AVPixelFormat PixFmt, int Width, int Height) |
633 | { |
634 | AVFrame *pPicture = av_frame_alloc(); |
635 | if(!pPicture) |
636 | { |
637 | log_error("videorecorder" , "Could not allocate video frame" ); |
638 | return nullptr; |
639 | } |
640 | |
641 | pPicture->format = PixFmt; |
642 | pPicture->width = Width; |
643 | pPicture->height = Height; |
644 | |
645 | /* allocate the buffers for the frame data */ |
646 | const int FrameBufferAllocResult = av_frame_get_buffer(frame: pPicture, align: 32); |
647 | if(FrameBufferAllocResult < 0) |
648 | { |
649 | char aError[AV_ERROR_MAX_STRING_SIZE]; |
650 | av_strerror(errnum: FrameBufferAllocResult, errbuf: aError, errbuf_size: sizeof(aError)); |
651 | log_error("videorecorder" , "Could not allocate video frame buffer: %s" , aError); |
652 | return nullptr; |
653 | } |
654 | |
655 | return pPicture; |
656 | } |
657 | |
658 | AVFrame *CVideo::AllocAudioFrame(enum AVSampleFormat SampleFmt, uint64_t ChannelLayout, int SampleRate, int NbSamples) |
659 | { |
660 | AVFrame *pFrame = av_frame_alloc(); |
661 | if(!pFrame) |
662 | { |
663 | log_error("videorecorder" , "Could not allocate audio frame" ); |
664 | return nullptr; |
665 | } |
666 | |
667 | pFrame->format = SampleFmt; |
668 | #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100) |
669 | dbg_assert(av_channel_layout_from_mask(&pFrame->ch_layout, ChannelLayout) == 0, "Failed to set channel layout" ); |
670 | #else |
671 | pFrame->channel_layout = ChannelLayout; |
672 | #endif |
673 | pFrame->sample_rate = SampleRate; |
674 | pFrame->nb_samples = NbSamples; |
675 | |
676 | if(NbSamples) |
677 | { |
678 | const int FrameBufferAllocResult = av_frame_get_buffer(frame: pFrame, align: 0); |
679 | if(FrameBufferAllocResult < 0) |
680 | { |
681 | char aError[AV_ERROR_MAX_STRING_SIZE]; |
682 | av_strerror(errnum: FrameBufferAllocResult, errbuf: aError, errbuf_size: sizeof(aError)); |
683 | log_error("videorecorder" , "Could not allocate audio frame buffer: %s" , aError); |
684 | return nullptr; |
685 | } |
686 | } |
687 | |
688 | return pFrame; |
689 | } |
690 | |
691 | bool CVideo::OpenVideo() |
692 | { |
693 | AVCodecContext *pContext = m_VideoStream.m_pCodecContext; |
694 | AVDictionary *pOptions = nullptr; |
695 | av_dict_copy(dst: &pOptions, src: m_pOptDict, flags: 0); |
696 | |
697 | /* open the codec */ |
698 | const int VideoOpenResult = avcodec_open2(avctx: pContext, codec: m_pVideoCodec, options: &pOptions); |
699 | av_dict_free(m: &pOptions); |
700 | if(VideoOpenResult < 0) |
701 | { |
702 | char aError[AV_ERROR_MAX_STRING_SIZE]; |
703 | av_strerror(errnum: VideoOpenResult, errbuf: aError, errbuf_size: sizeof(aError)); |
704 | log_error("videorecorder" , "Could not open video codec: %s" , aError); |
705 | return false; |
706 | } |
707 | |
708 | m_VideoStream.m_vpFrames.clear(); |
709 | m_VideoStream.m_vpFrames.reserve(n: m_VideoThreads); |
710 | |
711 | /* allocate and init a re-usable frame */ |
712 | for(size_t i = 0; i < m_VideoThreads; ++i) |
713 | { |
714 | m_VideoStream.m_vpFrames.emplace_back(args: nullptr); |
715 | m_VideoStream.m_vpFrames[i] = AllocPicture(PixFmt: pContext->pix_fmt, Width: pContext->width, Height: pContext->height); |
716 | if(!m_VideoStream.m_vpFrames[i]) |
717 | { |
718 | return false; |
719 | } |
720 | } |
721 | |
722 | /* If the output format is not YUV420P, then a temporary YUV420P |
723 | * picture is needed too. It is then converted to the required |
724 | * output format. */ |
725 | m_VideoStream.m_vpTmpFrames.clear(); |
726 | m_VideoStream.m_vpTmpFrames.reserve(n: m_VideoThreads); |
727 | |
728 | if(pContext->pix_fmt != AV_PIX_FMT_YUV420P) |
729 | { |
730 | /* allocate and init a re-usable frame */ |
731 | for(size_t i = 0; i < m_VideoThreads; ++i) |
732 | { |
733 | m_VideoStream.m_vpTmpFrames.emplace_back(args: nullptr); |
734 | m_VideoStream.m_vpTmpFrames[i] = AllocPicture(PixFmt: AV_PIX_FMT_YUV420P, Width: pContext->width, Height: pContext->height); |
735 | if(!m_VideoStream.m_vpTmpFrames[i]) |
736 | { |
737 | return false; |
738 | } |
739 | } |
740 | } |
741 | |
742 | /* copy the stream parameters to the muxer */ |
743 | const int AudioStreamCopyResult = avcodec_parameters_from_context(par: m_VideoStream.m_pStream->codecpar, codec: pContext); |
744 | if(AudioStreamCopyResult < 0) |
745 | { |
746 | char aError[AV_ERROR_MAX_STRING_SIZE]; |
747 | av_strerror(errnum: AudioStreamCopyResult, errbuf: aError, errbuf_size: sizeof(aError)); |
748 | log_error("videorecorder" , "Could not copy video stream parameters: %s" , aError); |
749 | return false; |
750 | } |
751 | m_VideoFrameIndex = 0; |
752 | return true; |
753 | } |
754 | |
755 | bool CVideo::OpenAudio() |
756 | { |
757 | AVCodecContext *pContext = m_AudioStream.m_pCodecContext; |
758 | AVDictionary *pOptions = nullptr; |
759 | av_dict_copy(dst: &pOptions, src: m_pOptDict, flags: 0); |
760 | |
761 | /* open it */ |
762 | const int AudioOpenResult = avcodec_open2(avctx: pContext, codec: m_pAudioCodec, options: &pOptions); |
763 | av_dict_free(m: &pOptions); |
764 | if(AudioOpenResult < 0) |
765 | { |
766 | char aError[AV_ERROR_MAX_STRING_SIZE]; |
767 | av_strerror(errnum: AudioOpenResult, errbuf: aError, errbuf_size: sizeof(aError)); |
768 | log_error("videorecorder" , "Could not open audio codec: %s" , aError); |
769 | return false; |
770 | } |
771 | |
772 | int NbSamples; |
773 | if(pContext->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE) |
774 | NbSamples = 10000; |
775 | else |
776 | NbSamples = pContext->frame_size; |
777 | |
778 | m_AudioStream.m_vpFrames.clear(); |
779 | m_AudioStream.m_vpFrames.reserve(n: m_AudioThreads); |
780 | |
781 | m_AudioStream.m_vpTmpFrames.clear(); |
782 | m_AudioStream.m_vpTmpFrames.reserve(n: m_AudioThreads); |
783 | |
784 | /* allocate and init a re-usable frame */ |
785 | for(size_t i = 0; i < m_AudioThreads; ++i) |
786 | { |
787 | m_AudioStream.m_vpFrames.emplace_back(args: nullptr); |
788 | #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100) |
789 | m_AudioStream.m_vpFrames[i] = AllocAudioFrame(SampleFmt: pContext->sample_fmt, ChannelLayout: pContext->ch_layout.u.mask, SampleRate: pContext->sample_rate, NbSamples); |
790 | #else |
791 | m_AudioStream.m_vpFrames[i] = AllocAudioFrame(pContext->sample_fmt, pContext->channel_layout, pContext->sample_rate, NbSamples); |
792 | #endif |
793 | if(!m_AudioStream.m_vpFrames[i]) |
794 | { |
795 | return false; |
796 | } |
797 | |
798 | m_AudioStream.m_vpTmpFrames.emplace_back(args: nullptr); |
799 | m_AudioStream.m_vpTmpFrames[i] = AllocAudioFrame(SampleFmt: AV_SAMPLE_FMT_S16, AV_CH_LAYOUT_STEREO, SampleRate: m_pSound->MixingRate(), NbSamples); |
800 | if(!m_AudioStream.m_vpTmpFrames[i]) |
801 | { |
802 | return false; |
803 | } |
804 | } |
805 | |
806 | /* copy the stream parameters to the muxer */ |
807 | const int AudioStreamCopyResult = avcodec_parameters_from_context(par: m_AudioStream.m_pStream->codecpar, codec: pContext); |
808 | if(AudioStreamCopyResult < 0) |
809 | { |
810 | char aError[AV_ERROR_MAX_STRING_SIZE]; |
811 | av_strerror(errnum: AudioStreamCopyResult, errbuf: aError, errbuf_size: sizeof(aError)); |
812 | log_error("videorecorder" , "Could not copy audio stream parameters: %s" , aError); |
813 | return false; |
814 | } |
815 | |
816 | /* create resampling context */ |
817 | m_AudioStream.m_vpSwrContexts.clear(); |
818 | m_AudioStream.m_vpSwrContexts.resize(new_size: m_AudioThreads); |
819 | for(size_t i = 0; i < m_AudioThreads; ++i) |
820 | { |
821 | m_AudioStream.m_vpSwrContexts[i] = swr_alloc(); |
822 | if(!m_AudioStream.m_vpSwrContexts[i]) |
823 | { |
824 | log_error("videorecorder" , "Could not allocate resampling context" ); |
825 | return false; |
826 | } |
827 | |
828 | /* set options */ |
829 | dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrContexts[i], "in_channel_count" , 2, 0) == 0, "invalid option" ); |
830 | if(av_opt_set_int(obj: m_AudioStream.m_vpSwrContexts[i], name: "in_sample_rate" , val: m_pSound->MixingRate(), search_flags: 0) != 0) |
831 | { |
832 | log_error("videorecorder" , "Could not set audio sample rate to %d" , m_pSound->MixingRate()); |
833 | return false; |
834 | } |
835 | dbg_assert(av_opt_set_sample_fmt(m_AudioStream.m_vpSwrContexts[i], "in_sample_fmt" , AV_SAMPLE_FMT_S16, 0) == 0, "invalid option" ); |
836 | #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100) |
837 | dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrContexts[i], "out_channel_count" , pContext->ch_layout.nb_channels, 0) == 0, "invalid option" ); |
838 | #else |
839 | dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrContexts[i], "out_channel_count" , pContext->channels, 0) == 0, "invalid option" ); |
840 | #endif |
841 | dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrContexts[i], "out_sample_rate" , pContext->sample_rate, 0) == 0, "invalid option" ); |
842 | dbg_assert(av_opt_set_sample_fmt(m_AudioStream.m_vpSwrContexts[i], "out_sample_fmt" , pContext->sample_fmt, 0) == 0, "invalid option" ); |
843 | |
844 | /* initialize the resampling context */ |
845 | const int ResamplingContextInitResult = swr_init(s: m_AudioStream.m_vpSwrContexts[i]); |
846 | if(ResamplingContextInitResult < 0) |
847 | { |
848 | char aError[AV_ERROR_MAX_STRING_SIZE]; |
849 | av_strerror(errnum: ResamplingContextInitResult, errbuf: aError, errbuf_size: sizeof(aError)); |
850 | log_error("videorecorder" , "Could not initialize resampling context: %s" , aError); |
851 | return false; |
852 | } |
853 | } |
854 | |
855 | m_AudioFrameIndex = 0; |
856 | return true; |
857 | } |
858 | |
859 | /* Add an output stream. */ |
860 | bool CVideo::AddStream(COutputStream *pStream, AVFormatContext *pFormatContext, const AVCodec **ppCodec, enum AVCodecID CodecId) const |
861 | { |
862 | /* find the encoder */ |
863 | *ppCodec = avcodec_find_encoder(id: CodecId); |
864 | if(!(*ppCodec)) |
865 | { |
866 | log_error("videorecorder" , "Could not find encoder for codec '%s'" , avcodec_get_name(CodecId)); |
867 | return false; |
868 | } |
869 | |
870 | pStream->m_pStream = avformat_new_stream(s: pFormatContext, c: nullptr); |
871 | if(!pStream->m_pStream) |
872 | { |
873 | log_error("videorecorder" , "Could not allocate stream" ); |
874 | return false; |
875 | } |
876 | pStream->m_pStream->id = pFormatContext->nb_streams - 1; |
877 | AVCodecContext *pContext = avcodec_alloc_context3(codec: *ppCodec); |
878 | if(!pContext) |
879 | { |
880 | log_error("videorecorder" , "Could not allocate encoding context" ); |
881 | return false; |
882 | } |
883 | pStream->m_pCodecContext = pContext; |
884 | |
885 | #if defined(CONF_ARCH_IA32) || defined(CONF_ARCH_ARM) |
886 | // use only 1 ffmpeg thread on 32-bit to save memory |
887 | pContext->thread_count = 1; |
888 | #endif |
889 | |
890 | switch((*ppCodec)->type) |
891 | { |
892 | case AVMEDIA_TYPE_AUDIO: |
893 | pContext->sample_fmt = (*ppCodec)->sample_fmts ? (*ppCodec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP; |
894 | if((*ppCodec)->supported_samplerates) |
895 | { |
896 | pContext->sample_rate = (*ppCodec)->supported_samplerates[0]; |
897 | for(int i = 0; (*ppCodec)->supported_samplerates[i]; i++) |
898 | { |
899 | if((*ppCodec)->supported_samplerates[i] == m_pSound->MixingRate()) |
900 | { |
901 | pContext->sample_rate = m_pSound->MixingRate(); |
902 | break; |
903 | } |
904 | } |
905 | } |
906 | else |
907 | { |
908 | pContext->sample_rate = m_pSound->MixingRate(); |
909 | } |
910 | pContext->bit_rate = pContext->sample_rate * 2 * 16; |
911 | #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100) |
912 | dbg_assert(av_channel_layout_from_mask(&pContext->ch_layout, AV_CH_LAYOUT_STEREO) == 0, "Failed to set channel layout" ); |
913 | #else |
914 | pContext->channels = 2; |
915 | pContext->channel_layout = AV_CH_LAYOUT_STEREO; |
916 | #endif |
917 | |
918 | pStream->m_pStream->time_base.num = 1; |
919 | pStream->m_pStream->time_base.den = pContext->sample_rate; |
920 | break; |
921 | |
922 | case AVMEDIA_TYPE_VIDEO: |
923 | pContext->codec_id = CodecId; |
924 | |
925 | pContext->bit_rate = 400000; |
926 | /* Resolution must be a multiple of two. */ |
927 | pContext->width = m_Width; |
928 | pContext->height = m_Height % 2 == 0 ? m_Height : m_Height - 1; |
929 | /* timebase: This is the fundamental unit of time (in seconds) in terms |
930 | * of which frame timestamps are represented. For fixed-fps content, |
931 | * timebase should be 1/framerate and timestamp increments should be |
932 | * identical to 1. */ |
933 | pStream->m_pStream->time_base.num = 1; |
934 | pStream->m_pStream->time_base.den = m_FPS; |
935 | pContext->time_base = pStream->m_pStream->time_base; |
936 | |
937 | pContext->gop_size = 12; /* emit one intra frame every twelve frames at most */ |
938 | pContext->pix_fmt = STREAM_PIX_FMT; |
939 | if(pContext->codec_id == AV_CODEC_ID_MPEG2VIDEO) |
940 | { |
941 | /* just for testing, we also add B-frames */ |
942 | pContext->max_b_frames = 2; |
943 | } |
944 | if(pContext->codec_id == AV_CODEC_ID_MPEG1VIDEO) |
945 | { |
946 | /* Needed to avoid using macroblocks in which some coeffs overflow. |
947 | * This does not happen with normal video, it just happens here as |
948 | * the motion of the chroma plane does not match the luma plane. */ |
949 | pContext->mb_decision = 2; |
950 | } |
951 | if(CodecId == AV_CODEC_ID_H264) |
952 | { |
953 | static const char *s_apPresets[10] = {"ultrafast" , "superfast" , "veryfast" , "faster" , "fast" , "medium" , "slow" , "slower" , "veryslow" , "placebo" }; |
954 | dbg_assert(g_Config.m_ClVideoX264Preset < (int)std::size(s_apPresets), "preset index invalid" ); |
955 | dbg_assert(av_opt_set(pContext->priv_data, "preset" , s_apPresets[g_Config.m_ClVideoX264Preset], 0) == 0, "invalid option" ); |
956 | dbg_assert(av_opt_set_int(pContext->priv_data, "crf" , g_Config.m_ClVideoX264Crf, 0) == 0, "invalid option" ); |
957 | } |
958 | break; |
959 | |
960 | default: |
961 | break; |
962 | } |
963 | |
964 | /* Some formats want stream headers to be separate. */ |
965 | if(pFormatContext->oformat->flags & AVFMT_GLOBALHEADER) |
966 | pContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; |
967 | |
968 | return true; |
969 | } |
970 | |
971 | void CVideo::WriteFrame(COutputStream *pStream, size_t ThreadIndex) |
972 | { |
973 | AVPacket *pPacket = av_packet_alloc(); |
974 | if(pPacket == nullptr) |
975 | { |
976 | log_error("videorecorder" , "Could not allocate packet" ); |
977 | return; |
978 | } |
979 | |
980 | pPacket->data = 0; |
981 | pPacket->size = 0; |
982 | |
983 | avcodec_send_frame(avctx: pStream->m_pCodecContext, frame: pStream->m_vpFrames[ThreadIndex]); |
984 | int RecvResult = 0; |
985 | do |
986 | { |
987 | RecvResult = avcodec_receive_packet(avctx: pStream->m_pCodecContext, avpkt: pPacket); |
988 | if(!RecvResult) |
989 | { |
990 | /* rescale output packet timestamp values from codec to stream timebase */ |
991 | av_packet_rescale_ts(pkt: pPacket, tb_src: pStream->m_pCodecContext->time_base, tb_dst: pStream->m_pStream->time_base); |
992 | pPacket->stream_index = pStream->m_pStream->index; |
993 | |
994 | const int WriteFrameResult = av_interleaved_write_frame(s: m_pFormatContext, pkt: pPacket); |
995 | if(WriteFrameResult < 0) |
996 | { |
997 | char aError[AV_ERROR_MAX_STRING_SIZE]; |
998 | av_strerror(errnum: WriteFrameResult, errbuf: aError, errbuf_size: sizeof(aError)); |
999 | log_error("videorecorder" , "Could not write video frame: %s" , aError); |
1000 | } |
1001 | } |
1002 | else |
1003 | break; |
1004 | } while(true); |
1005 | |
1006 | if(RecvResult && RecvResult != AVERROR(EAGAIN)) |
1007 | { |
1008 | char aError[AV_ERROR_MAX_STRING_SIZE]; |
1009 | av_strerror(errnum: RecvResult, errbuf: aError, errbuf_size: sizeof(aError)); |
1010 | log_error("videorecorder" , "Could not encode video frame: %s" , aError); |
1011 | } |
1012 | |
1013 | av_packet_free(pkt: &pPacket); |
1014 | } |
1015 | |
1016 | void CVideo::FinishFrames(COutputStream *pStream) |
1017 | { |
1018 | if(!pStream->m_pCodecContext || !avcodec_is_open(s: pStream->m_pCodecContext)) |
1019 | return; |
1020 | |
1021 | AVPacket *pPacket = av_packet_alloc(); |
1022 | if(pPacket == nullptr) |
1023 | { |
1024 | log_error("videorecorder" , "Could not allocate packet" ); |
1025 | return; |
1026 | } |
1027 | |
1028 | pPacket->data = 0; |
1029 | pPacket->size = 0; |
1030 | |
1031 | avcodec_send_frame(avctx: pStream->m_pCodecContext, frame: 0); |
1032 | int RecvResult = 0; |
1033 | do |
1034 | { |
1035 | RecvResult = avcodec_receive_packet(avctx: pStream->m_pCodecContext, avpkt: pPacket); |
1036 | if(!RecvResult) |
1037 | { |
1038 | /* rescale output packet timestamp values from codec to stream timebase */ |
1039 | av_packet_rescale_ts(pkt: pPacket, tb_src: pStream->m_pCodecContext->time_base, tb_dst: pStream->m_pStream->time_base); |
1040 | pPacket->stream_index = pStream->m_pStream->index; |
1041 | |
1042 | const int WriteFrameResult = av_interleaved_write_frame(s: m_pFormatContext, pkt: pPacket); |
1043 | if(WriteFrameResult < 0) |
1044 | { |
1045 | char aError[AV_ERROR_MAX_STRING_SIZE]; |
1046 | av_strerror(errnum: WriteFrameResult, errbuf: aError, errbuf_size: sizeof(aError)); |
1047 | log_error("videorecorder" , "Could not write video frame: %s" , aError); |
1048 | } |
1049 | } |
1050 | else |
1051 | break; |
1052 | } while(true); |
1053 | |
1054 | if(RecvResult && RecvResult != AVERROR_EOF) |
1055 | { |
1056 | char aError[AV_ERROR_MAX_STRING_SIZE]; |
1057 | av_strerror(errnum: RecvResult, errbuf: aError, errbuf_size: sizeof(aError)); |
1058 | log_error("videorecorder" , "Could not finish recording: %s" , aError); |
1059 | } |
1060 | |
1061 | av_packet_free(pkt: &pPacket); |
1062 | } |
1063 | |
1064 | void CVideo::CloseStream(COutputStream *pStream) |
1065 | { |
1066 | avcodec_free_context(avctx: &pStream->m_pCodecContext); |
1067 | |
1068 | for(auto *pFrame : pStream->m_vpFrames) |
1069 | av_frame_free(frame: &pFrame); |
1070 | pStream->m_vpFrames.clear(); |
1071 | |
1072 | for(auto *pFrame : pStream->m_vpTmpFrames) |
1073 | av_frame_free(frame: &pFrame); |
1074 | pStream->m_vpTmpFrames.clear(); |
1075 | |
1076 | for(auto *pSwsContext : pStream->m_vpSwsContexts) |
1077 | sws_freeContext(swsContext: pSwsContext); |
1078 | pStream->m_vpSwsContexts.clear(); |
1079 | |
1080 | for(auto *pSwrContext : pStream->m_vpSwrContexts) |
1081 | swr_free(s: &pSwrContext); |
1082 | pStream->m_vpSwrContexts.clear(); |
1083 | } |
1084 | |
1085 | #endif |
1086 | |