Move video encoding to separate thread, remove pixel_format that doesn't do anything and was bugged

This commit is contained in:
dec05eba 2023-03-21 12:10:16 +01:00
parent 716cb5b448
commit 5d80bd886c
2 changed files with 85 additions and 49 deletions

View File

@ -372,12 +372,6 @@ static void gsr_capture_xcomposite_cuda_tick(gsr_capture *cap, AVCodecContext *v
cap_xcomp->texture_size.x = min_int(video_codec_context->width, max_int(2, cap_xcomp->texture_size.x & ~1)); cap_xcomp->texture_size.x = min_int(video_codec_context->width, max_int(2, cap_xcomp->texture_size.x & ~1));
cap_xcomp->texture_size.y = min_int(video_codec_context->height, max_int(2, cap_xcomp->texture_size.y & ~1)); cap_xcomp->texture_size.y = min_int(video_codec_context->height, max_int(2, cap_xcomp->texture_size.y & ~1));
if(!cap_xcomp->params.follow_focused) {
cap_xcomp->egl.glBindTexture(GL_TEXTURE_2D, cap_xcomp->target_texture_id);
cap_xcomp->egl.glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, cap_xcomp->texture_size.x, cap_xcomp->texture_size.y, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
cap_xcomp->egl.glBindTexture(GL_TEXTURE_2D, 0);
}
av_frame_free(frame); av_frame_free(frame);
*frame = av_frame_alloc(); *frame = av_frame_alloc();
if(!frame) { if(!frame) {

View File

@ -162,7 +162,7 @@ static int x11_io_error_handler(Display *dpy) {
} }
// |stream| is only required for non-replay mode // |stream| is only required for non-replay mode
static void receive_frames(AVCodecContext *av_codec_context, int stream_index, AVStream *stream, AVFrame *frame, static void receive_frames(AVCodecContext *av_codec_context, int stream_index, AVStream *stream, int64_t pts,
AVFormatContext *av_format_context, AVFormatContext *av_format_context,
double replay_start_time, double replay_start_time,
std::deque<AVPacket> &frame_data_queue, std::deque<AVPacket> &frame_data_queue,
@ -178,10 +178,8 @@ static void receive_frames(AVCodecContext *av_codec_context, int stream_index, A
int res = avcodec_receive_packet(av_codec_context, &av_packet); int res = avcodec_receive_packet(av_codec_context, &av_packet);
if (res == 0) { // we have a packet, send the packet to the muxer if (res == 0) { // we have a packet, send the packet to the muxer
av_packet.stream_index = stream_index; av_packet.stream_index = stream_index;
av_packet.pts = av_packet.dts = frame->pts; av_packet.pts = pts;
av_packet.dts = pts;
if(frame->flags & AV_FRAME_FLAG_DISCARD)
av_packet.flags |= AV_PKT_FLAG_DISCARD;
std::lock_guard<std::mutex> lock(write_output_mutex); std::lock_guard<std::mutex> lock(write_output_mutex);
if(replay_buffer_size_secs != -1) { if(replay_buffer_size_secs != -1) {
@ -609,15 +607,6 @@ static void open_video(AVCodecContext *codec_context, VideoQuality video_quality
break; break;
} }
} }
switch(pixel_format) {
case PixelFormat::YUV420:
av_opt_set(&options, "pixel_format", "yuv420p", 0);
break;
case PixelFormat::YUV444:
av_opt_set(&options, "pixel_format", "yuv444p", 0);
break;
}
} else { } else {
switch(video_quality) { switch(video_quality) {
case VideoQuality::MEDIUM: case VideoQuality::MEDIUM:
@ -1753,6 +1742,10 @@ int main(int argc, char **argv) {
// Jesus is there a better way to do this? I JUST WANT TO KEEP VIDEO AND AUDIO SYNCED HOLY FUCK I WANT TO KILL MYSELF NOW. // Jesus is there a better way to do this? I JUST WANT TO KEEP VIDEO AND AUDIO SYNCED HOLY FUCK I WANT TO KILL MYSELF NOW.
// THIS PIECE OF SHIT WANTS EMPTY FRAMES OTHERWISE VIDEO PLAYS TOO FAST TO KEEP UP WITH AUDIO OR THE AUDIO PLAYS TOO EARLY. // THIS PIECE OF SHIT WANTS EMPTY FRAMES OTHERWISE VIDEO PLAYS TOO FAST TO KEEP UP WITH AUDIO OR THE AUDIO PLAYS TOO EARLY.
// BUT WE CANT USE DELAYS TO GIVE DUMMY DATA BECAUSE PULSEAUDIO MIGHT GIVE AUDIO A BIG DELAYED!!! // BUT WE CANT USE DELAYS TO GIVE DUMMY DATA BECAUSE PULSEAUDIO MIGHT GIVE AUDIO A BIG DELAYED!!!
// This garbage is needed because we want to produce constant frame rate videos instead of variable frame rate
// videos because bad software such as video editing software and VLC do not support variable frame rate software,
// despite nvidia shadowplay and xbox game bar producing variable frame rate videos.
// So we have to make sure we produce frames at the same relative rate as the video.
if(num_missing_frames >= 5 || !audio_device.sound_device.handle) { if(num_missing_frames >= 5 || !audio_device.sound_device.handle) {
// TODO: // TODO:
//audio_track.frame->data[0] = empty_audio; //audio_track.frame->data[0] = empty_audio;
@ -1774,8 +1767,9 @@ int main(int argc, char **argv) {
audio_track.frame->pts = audio_track.pts; audio_track.frame->pts = audio_track.pts;
audio_track.pts += audio_track.frame->nb_samples; audio_track.pts += audio_track.frame->nb_samples;
ret = avcodec_send_frame(audio_track.codec_context, audio_track.frame); ret = avcodec_send_frame(audio_track.codec_context, audio_track.frame);
if(ret >= 0){ if(ret >= 0) {
receive_frames(audio_track.codec_context, audio_track.stream_index, audio_track.stream, audio_track.frame, av_format_context, record_start_time, frame_data_queue, replay_buffer_size_secs, frames_erased, write_output_mutex); // TODO: Move to separate thread because this could write to network (for example when livestreaming)
receive_frames(audio_track.codec_context, audio_track.stream_index, audio_track.stream, audio_track.frame->pts, av_format_context, record_start_time, frame_data_queue, replay_buffer_size_secs, frames_erased, write_output_mutex);
} else { } else {
fprintf(stderr, "Failed to encode audio!\n"); fprintf(stderr, "Failed to encode audio!\n");
} }
@ -1803,8 +1797,9 @@ int main(int argc, char **argv) {
audio_track.frame->pts = audio_track.pts; audio_track.frame->pts = audio_track.pts;
audio_track.pts += audio_track.frame->nb_samples; audio_track.pts += audio_track.frame->nb_samples;
ret = avcodec_send_frame(audio_track.codec_context, audio_track.frame); ret = avcodec_send_frame(audio_track.codec_context, audio_track.frame);
if(ret >= 0){ if(ret >= 0) {
receive_frames(audio_track.codec_context, audio_track.stream_index, audio_track.stream, audio_track.frame, av_format_context, record_start_time, frame_data_queue, replay_buffer_size_secs, frames_erased, write_output_mutex); // TODO: Move to separate thread because this could write to network (for example when livestreaming)
receive_frames(audio_track.codec_context, audio_track.stream_index, audio_track.stream, audio_track.frame->pts, av_format_context, record_start_time, frame_data_queue, replay_buffer_size_secs, frames_erased, write_output_mutex);
} else { } else {
fprintf(stderr, "Failed to encode audio!\n"); fprintf(stderr, "Failed to encode audio!\n");
} }
@ -1820,12 +1815,62 @@ int main(int argc, char **argv) {
// Set update_fps to 24 to test if duplicate/delayed frames cause video/audio desync or too fast/slow video. // Set update_fps to 24 to test if duplicate/delayed frames cause video/audio desync or too fast/slow video.
const double update_fps = fps + 190; const double update_fps = fps + 190;
int64_t video_pts_counter = 0;
bool should_stop_error = false; bool should_stop_error = false;
AVFrame *aframe = av_frame_alloc(); AVFrame *aframe = av_frame_alloc();
while (running) { // Separate video encoding from frame capture because on amd/intel the frame capture can be very very slow
// if we are hitting the graphical processing limit, in which case all applications will run at the same framerate
// as the game framerate. This performance seems to be artificially limited.
// This garbage is needed because we want to produce constant frame rate videos instead of variable frame rate
// videos because bad software such as video editing software and VLC do not support variable frame rate software,
// despite nvidia shadowplay and xbox game bar producing variable frame rate videos.
// So we have to encode a frame multiple times (duplicate) if we dont produce exactly 1000/fps frames a second.
AVFrame *latest_video_frame = nullptr;
std::condition_variable video_frame_cv;
std::mutex video_frame_mutex;
std::thread video_send_encode_thread([&]() {
int64_t video_pts_counter = 0;
AVFrame *video_frame = nullptr;
while(running) {
{
std::unique_lock<std::mutex> lock(video_frame_mutex);
video_frame_cv.wait(lock, [&]{ return latest_video_frame || !running; });
if(!running)
break;
if(!latest_video_frame)
continue;
video_frame = latest_video_frame;
latest_video_frame = nullptr;
}
const double this_video_frame_time = clock_get_monotonic_seconds();
const int64_t expected_frames = std::round((this_video_frame_time - start_time_pts) / target_fps);
const int num_frames = std::max(0L, expected_frames - video_pts_counter);
// TODO: Check if duplicate frame can be saved just by writing it with a different pts instead of sending it again
for(int i = 0; i < num_frames; ++i) {
video_frame->pts = video_pts_counter + i;
int ret = avcodec_send_frame(video_codec_context, video_frame);
if(ret == 0) {
// TODO: Move to separate thread because this could write to network (for example when livestreaming)
receive_frames(video_codec_context, VIDEO_STREAM_INDEX, video_stream, video_frame->pts, av_format_context,
record_start_time, frame_data_queue, replay_buffer_size_secs, frames_erased, write_output_mutex);
} else {
fprintf(stderr, "Error: avcodec_send_frame failed, error: %s\n", av_error_to_string(ret));
}
}
video_pts_counter += num_frames;
av_frame_free(&video_frame);
video_frame = nullptr;
}
});
while(running) {
double frame_start = clock_get_monotonic_seconds(); double frame_start = clock_get_monotonic_seconds();
gsr_capture_tick(capture, video_codec_context, &frame); gsr_capture_tick(capture, video_codec_context, &frame);
@ -1848,7 +1893,8 @@ int main(int argc, char **argv) {
audio_track.pts += audio_track.codec_context->frame_size; audio_track.pts += audio_track.codec_context->frame_size;
err = avcodec_send_frame(audio_track.codec_context, aframe); err = avcodec_send_frame(audio_track.codec_context, aframe);
if(err >= 0){ if(err >= 0){
receive_frames(audio_track.codec_context, audio_track.stream_index, audio_track.stream, aframe, av_format_context, record_start_time, frame_data_queue, replay_buffer_size_secs, frames_erased, write_output_mutex); // TODO: Move to separate thread because this could write to network (for example when livestreaming)
receive_frames(audio_track.codec_context, audio_track.stream_index, audio_track.stream, aframe->pts, av_format_context, record_start_time, frame_data_queue, replay_buffer_size_secs, frames_erased, write_output_mutex);
} else { } else {
fprintf(stderr, "Failed to encode audio!\n"); fprintf(stderr, "Failed to encode audio!\n");
} }
@ -1870,28 +1916,13 @@ int main(int argc, char **argv) {
if (frame_time_overflow >= 0.0) { if (frame_time_overflow >= 0.0) {
frame_timer_start = time_now - frame_time_overflow; frame_timer_start = time_now - frame_time_overflow;
gsr_capture_capture(capture, frame); gsr_capture_capture(capture, frame);
std::lock_guard<std::mutex> lock(video_frame_mutex);
const double this_video_frame_time = clock_get_monotonic_seconds(); if(latest_video_frame) {
const int64_t expected_frames = std::round((this_video_frame_time - start_time_pts) / target_fps); av_frame_free(&latest_video_frame);
latest_video_frame = nullptr;
const int num_frames = std::max(0L, expected_frames - video_pts_counter);
frame->flags &= ~AV_FRAME_FLAG_DISCARD;
// TODO: Check if duplicate frame can be saved just by writing it with a different pts instead of sending it again
for(int i = 0; i < num_frames; ++i) {
if(i > 0)
frame->flags |= AV_FRAME_FLAG_DISCARD;
frame->pts = video_pts_counter + i;
int ret = avcodec_send_frame(video_codec_context, frame);
if (ret >= 0) {
receive_frames(video_codec_context, VIDEO_STREAM_INDEX, video_stream, frame, av_format_context,
record_start_time, frame_data_queue, replay_buffer_size_secs, frames_erased, write_output_mutex);
} else {
fprintf(stderr, "Error: avcodec_send_frame failed, error: %s\n", av_error_to_string(ret));
}
} }
video_pts_counter += num_frames; latest_video_frame = av_frame_clone(frame);
video_frame_cv.notify_one();
} }
if(save_replay_thread.valid() && save_replay_thread.wait_for(std::chrono::seconds(0)) == std::future_status::ready) { if(save_replay_thread.valid() && save_replay_thread.wait_for(std::chrono::seconds(0)) == std::future_status::ready) {
@ -1905,7 +1936,6 @@ int main(int argc, char **argv) {
save_replay_async(video_codec_context, VIDEO_STREAM_INDEX, audio_tracks, frame_data_queue, frames_erased, filename, container_format, file_extension, write_output_mutex); save_replay_async(video_codec_context, VIDEO_STREAM_INDEX, audio_tracks, frame_data_queue, frames_erased, filename, container_format, file_extension, write_output_mutex);
} }
// av_frame_free(&frame);
double frame_end = clock_get_monotonic_seconds(); double frame_end = clock_get_monotonic_seconds();
double frame_sleep_fps = 1.0 / update_fps; double frame_sleep_fps = 1.0 / update_fps;
double sleep_time = frame_sleep_fps - (frame_end - frame_start); double sleep_time = frame_sleep_fps - (frame_end - frame_start);
@ -1928,6 +1958,18 @@ int main(int argc, char **argv) {
} }
} }
{
std::lock_guard<std::mutex> lock(video_frame_mutex);
video_frame_cv.notify_one();
}
video_send_encode_thread.join();
//video_packet_save_thread.join();
if(latest_video_frame) {
av_frame_free(&latest_video_frame);
latest_video_frame = nullptr;
}
if (replay_buffer_size_secs == -1 && av_write_trailer(av_format_context) != 0) { if (replay_buffer_size_secs == -1 && av_write_trailer(av_format_context) != 0) {
fprintf(stderr, "Failed to write trailer\n"); fprintf(stderr, "Failed to write trailer\n");
} }