wip intel
This commit is contained in:
parent
a7e0dbd833
commit
6a6bb703bc
1
.gitignore
vendored
1
.gitignore
vendored
@ -6,6 +6,7 @@ tests/compile_commands.json
|
||||
|
||||
.clangd/
|
||||
.cache/
|
||||
.vscode/
|
||||
|
||||
*.o
|
||||
gpu-screen-recorder
|
||||
|
@ -7,7 +7,6 @@ where only the last few seconds are saved.
|
||||
|
||||
## Note
|
||||
This software works only on x11.\
|
||||
Recording a window doesn't work when using picom in glx mode. However it works in xrender mode or when recording the a monitor/screen (which uses NvFBC).\
|
||||
If you are using a variable refresh rate monitor, then choose to record "screen-direct". This will allow variable refresh rate to work when recording fullscreen applications. Note that some applications such as mpv will not work in fullscreen mode. A fix is being developed for this.\
|
||||
For screen capture to work with PRIME (laptops with a nvidia gpu), you must set the primary GPU to use your dedicated nvidia graphics card. You can do this by selecting "NVIDIA (Performance Mode) in nvidia settings:\
|
||||
![](https://dec05eba.com/images/nvidia-settings-prime.png)\
|
||||
@ -23,12 +22,12 @@ Using NvFBC (recording the monitor/screen) is not faster than not using NvFBC (r
|
||||
|
||||
# Installation
|
||||
If you are running an Arch Linux based distro, then you can find gpu screen recorder on aur under the name gpu-screen-recorder-git (`yay -S gpu-screen-recorder-git`).\
|
||||
If you are running an Ubuntu based distro then run `install_ubuntu.sh` as root: `sudo ./install_ubuntu.sh`. You also need to install the `libnvidia-compute` version that fits your nvidia driver to install libcuda.so to run gpu-screen-recorder. But it's recommended that you use the flatpak version of gpu-screen-recorder if you use an older version of ubuntu as the ffmpeg version will be old and wont support the best quality options.\
|
||||
If you are running an Ubuntu based distro then run `install_ubuntu.sh` as root: `sudo ./install_ubuntu.sh`. You also need to install the `libnvidia-compute` version that fits your nvidia driver to install libcuda.so to run gpu-screen-recorder and `libnvidia-fbc.so.1` when using nvfbc. But it's recommended that you use the flatpak version of gpu-screen-recorder if you use an older version of ubuntu as the ffmpeg version will be old and wont support the best quality options.\
|
||||
If you are running another distro then you can run `install.sh` as root: `sudo ./install.sh`, but you need to manually install the dependencies, as described below.\
|
||||
You can also install gpu screen recorder ([the gtk gui version](https://git.dec05eba.com/gpu-screen-recorder-gtk/)) from [flathub](https://flathub.org/apps/details/com.dec05eba.gpu_screen_recorder).
|
||||
|
||||
# Dependencies
|
||||
`libgl (libglvnd), ffmpeg, libx11, libxcomposite, libxrandr, libpulse`. You need to additionally have `libcuda.so` installed when you run `gpu-screen-recorder`.\
|
||||
`libgl (and libegl) (libglvnd), ffmpeg, libx11, libxcomposite, libpulse`. You need to additionally have `libcuda.so` installed when you run `gpu-screen-recorder` and `libnvidia-fbc.so.1` when using nvfbc.\
|
||||
Recording monitors requires a gpu with NvFBC support (note: this is not required when recording a single window!). Normally only tesla and quadro gpus support this, but by using [nvidia-patch](https://github.com/keylase/nvidia-patch) or [nvlax](https://github.com/illnyang/nvlax) you can do this on all gpus that support nvenc as well (gpus as old as the nvidia 600 series), provided you are not using outdated gpu drivers.
|
||||
|
||||
# How to use
|
||||
@ -55,8 +54,6 @@ The plugin does everything on the GPU and gives the texture to OBS, but OBS does
|
||||
FFMPEG only uses the GPU with CUDA when doing transcoding from an input video to an output video, and not when recording the screen when using x11grab. So FFMPEG has the same fps drop issues that OBS has.
|
||||
|
||||
# TODO
|
||||
* Support AMD and Intel, using VAAPI.
|
||||
libraries at compile-time.
|
||||
* Dynamically change bitrate/resolution to match desired fps. This would be helpful when streaming for example, where the encode output speed also depends on upload speed to the streaming service.
|
||||
* Show cursor when recording. Currently the cursor is not visible when recording a window.
|
||||
* Implement opengl injection to capture texture. This fixes composition issues and (VRR) without having to use NvFBC direct capture.
|
||||
|
4
TODO
4
TODO
@ -1,7 +1,6 @@
|
||||
Check for reparent.
|
||||
Only add window to list if its the window is a topmost window.
|
||||
Track window damages and only update then. That is better for output file size.
|
||||
Getting the texture of a window when using a compositor is an nvidia specific limitation. When gpu-screen-recorder supports other gpus then this can be ignored.
|
||||
Quickly changing workspace and back while recording under i3 breaks the screen recorder. i3 probably unmaps windows in other workspaces.
|
||||
See https://trac.ffmpeg.org/wiki/EncodingForStreamingSites for optimizing streaming.
|
||||
Add option to merge audio tracks into one (muxing?) by adding multiple audio streams in one -a arg separated by comma.
|
||||
@ -13,5 +12,6 @@ Allow recording a region by recording the compositor proxy window / nvfbc window
|
||||
Resizing the target window to be smaller than the initial size is buggy. The window texture ends up duplicated in the video.
|
||||
Handle frames (especially for applications with rounded client-side decorations, such as gnome applications. They are huge).
|
||||
Use nvenc directly, which allows removing the use of cuda.
|
||||
Fallback to nvfbc and window tracking if window capture fails.
|
||||
Handle xrandr monitor change in nvfbc.
|
||||
Add option to track the focused window. In that case the video size should dynamically change (change frame resolution) to match the window size and it should update when the window resizes.
|
||||
Add option for 4:4:4 chroma sampling for the output video.
|
8
build.sh
8
build.sh
@ -1,16 +1,18 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
#libdrm
|
||||
dependencies="libavcodec libavformat libavutil x11 xcomposite xrandr libpulse libswresample"
|
||||
includes="$(pkg-config --cflags $dependencies)"
|
||||
libs="$(pkg-config --libs $dependencies) -ldl -pthread -lm"
|
||||
gcc -c src/capture/capture.c -O2 -g0 -DNDEBUG $includes
|
||||
gcc -c src/capture/nvfbc.c -O2 -g0 -DNDEBUG $includes
|
||||
gcc -c src/capture/xcomposite.c -O2 -g0 -DNDEBUG $includes
|
||||
gcc -c src/gl.c -O2 -g0 -DNDEBUG $includes
|
||||
gcc -c src/capture/xcomposite_cuda.c -O2 -g0 -DNDEBUG $includes
|
||||
gcc -c src/capture/xcomposite_drm.c -O2 -g0 -DNDEBUG $includes
|
||||
gcc -c src/egl.c -O2 -g0 -DNDEBUG $includes
|
||||
gcc -c src/cuda.c -O2 -g0 -DNDEBUG $includes
|
||||
gcc -c src/window_texture.c -O2 -g0 -DNDEBUG $includes
|
||||
gcc -c src/time.c -O2 -g0 -DNDEBUG $includes
|
||||
g++ -c src/sound.cpp -O2 -g0 -DNDEBUG $includes
|
||||
g++ -c src/main.cpp -O2 -g0 -DNDEBUG $includes
|
||||
g++ -o gpu-screen-recorder -O2 capture.o nvfbc.o gl.o cuda.o window_texture.o time.o xcomposite.o sound.o main.o -s $libs
|
||||
g++ -o gpu-screen-recorder -O2 capture.o nvfbc.o egl.o cuda.o window_texture.o time.o xcomposite_cuda.o xcomposite_drm.o sound.o main.o -s $libs
|
||||
echo "Successfully built gpu-screen-recorder"
|
@ -1,16 +0,0 @@
|
||||
#ifndef GSR_CAPTURE_XCOMPOSITE_H
|
||||
#define GSR_CAPTURE_XCOMPOSITE_H
|
||||
|
||||
#include "capture.h"
|
||||
#include "../vec2.h"
|
||||
#include <X11/X.h>
|
||||
|
||||
typedef struct _XDisplay Display;
|
||||
|
||||
typedef struct {
|
||||
Window window;
|
||||
} gsr_capture_xcomposite_params;
|
||||
|
||||
gsr_capture* gsr_capture_xcomposite_create(const gsr_capture_xcomposite_params *params);
|
||||
|
||||
#endif /* GSR_CAPTURE_XCOMPOSITE_H */
|
16
include/capture/xcomposite_cuda.h
Normal file
16
include/capture/xcomposite_cuda.h
Normal file
@ -0,0 +1,16 @@
|
||||
#ifndef GSR_CAPTURE_XCOMPOSITE_CUDA_H
|
||||
#define GSR_CAPTURE_XCOMPOSITE_CUDA_H
|
||||
|
||||
#include "capture.h"
|
||||
#include "../vec2.h"
|
||||
#include <X11/X.h>
|
||||
|
||||
typedef struct _XDisplay Display;
|
||||
|
||||
typedef struct {
|
||||
Window window;
|
||||
} gsr_capture_xcomposite_cuda_params;
|
||||
|
||||
gsr_capture* gsr_capture_xcomposite_cuda_create(const gsr_capture_xcomposite_cuda_params *params);
|
||||
|
||||
#endif /* GSR_CAPTURE_XCOMPOSITE_CUDA_H */
|
16
include/capture/xcomposite_drm.h
Normal file
16
include/capture/xcomposite_drm.h
Normal file
@ -0,0 +1,16 @@
|
||||
#ifndef GSR_CAPTURE_XCOMPOSITE_DRM_H
|
||||
#define GSR_CAPTURE_XCOMPOSITE_DRM_H
|
||||
|
||||
#include "capture.h"
|
||||
#include "../vec2.h"
|
||||
#include <X11/X.h>
|
||||
|
||||
typedef struct _XDisplay Display;
|
||||
|
||||
typedef struct {
|
||||
Window window;
|
||||
} gsr_capture_xcomposite_drm_params;
|
||||
|
||||
gsr_capture* gsr_capture_xcomposite_drm_create(const gsr_capture_xcomposite_drm_params *params);
|
||||
|
||||
#endif /* GSR_CAPTURE_XCOMPOSITE_DRM_H */
|
170
include/egl.h
Normal file
170
include/egl.h
Normal file
@ -0,0 +1,170 @@
|
||||
#ifndef GSR_EGL_H
|
||||
#define GSR_EGL_H
|
||||
|
||||
/* OpenGL EGL library with a hidden window context (to allow using the opengl functions) */
|
||||
|
||||
#include <X11/X.h>
|
||||
#include <X11/Xutil.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#ifdef _WIN64
|
||||
typedef signed long long int khronos_intptr_t;
|
||||
typedef unsigned long long int khronos_uintptr_t;
|
||||
typedef signed long long int khronos_ssize_t;
|
||||
typedef unsigned long long int khronos_usize_t;
|
||||
#else
|
||||
typedef signed long int khronos_intptr_t;
|
||||
typedef unsigned long int khronos_uintptr_t;
|
||||
typedef signed long int khronos_ssize_t;
|
||||
typedef unsigned long int khronos_usize_t;
|
||||
#endif
|
||||
|
||||
typedef void* EGLDisplay;
|
||||
typedef void* EGLNativeDisplayType;
|
||||
typedef uintptr_t EGLNativeWindowType;
|
||||
typedef uintptr_t EGLNativePixmapType;
|
||||
typedef void* EGLConfig;
|
||||
typedef void* EGLSurface;
|
||||
typedef void* EGLContext;
|
||||
typedef void* EGLClientBuffer;
|
||||
typedef void* EGLImage;
|
||||
typedef void* EGLImageKHR;
|
||||
typedef void *GLeglImageOES;
|
||||
typedef void (*__eglMustCastToProperFunctionPointerType)(void);
|
||||
|
||||
#define EGL_BUFFER_SIZE 0x3020
|
||||
#define EGL_RENDERABLE_TYPE 0x3040
|
||||
#define EGL_OPENGL_ES2_BIT 0x0004
|
||||
#define EGL_NONE 0x3038
|
||||
#define EGL_CONTEXT_CLIENT_VERSION 0x3098
|
||||
#define EGL_BACK_BUFFER 0x3084
|
||||
|
||||
#define GL_TEXTURE_2D 0x0DE1
|
||||
#define GL_RGB 0x1907
|
||||
#define GL_UNSIGNED_BYTE 0x1401
|
||||
#define GL_COLOR_BUFFER_BIT 0x00004000
|
||||
#define GL_TEXTURE_WRAP_S 0x2802
|
||||
#define GL_TEXTURE_WRAP_T 0x2803
|
||||
#define GL_TEXTURE_MAG_FILTER 0x2800
|
||||
#define GL_TEXTURE_MIN_FILTER 0x2801
|
||||
#define GL_TEXTURE_WIDTH 0x1000
|
||||
#define GL_TEXTURE_HEIGHT 0x1001
|
||||
#define GL_NEAREST 0x2600
|
||||
#define GL_CLAMP_TO_EDGE 0x812F
|
||||
#define GL_LINEAR 0x2601
|
||||
#define GL_FRAMEBUFFER 0x8D40
|
||||
#define GL_COLOR_ATTACHMENT0 0x8CE0
|
||||
#define GL_FRAMEBUFFER_COMPLETE 0x8CD5
|
||||
#define GL_STATIC_DRAW 0x88E4
|
||||
#define GL_ARRAY_BUFFER 0x8892
|
||||
|
||||
#define GL_VENDOR 0x1F00
|
||||
#define GL_RENDERER 0x1F01
|
||||
|
||||
#define GLX_BUFFER_SIZE 2
|
||||
#define GLX_DOUBLEBUFFER 5
|
||||
#define GLX_RED_SIZE 8
|
||||
#define GLX_GREEN_SIZE 9
|
||||
#define GLX_BLUE_SIZE 10
|
||||
#define GLX_ALPHA_SIZE 11
|
||||
#define GLX_DEPTH_SIZE 12
|
||||
|
||||
#define GLX_RGBA_BIT 0x00000001
|
||||
#define GLX_RENDER_TYPE 0x8011
|
||||
#define GLX_FRONT_EXT 0x20DE
|
||||
#define GLX_BIND_TO_TEXTURE_RGB_EXT 0x20D0
|
||||
#define GLX_DRAWABLE_TYPE 0x8010
|
||||
#define GLX_WINDOW_BIT 0x00000001
|
||||
#define GLX_PIXMAP_BIT 0x00000002
|
||||
#define GLX_BIND_TO_TEXTURE_TARGETS_EXT 0x20D3
|
||||
#define GLX_TEXTURE_2D_BIT_EXT 0x00000002
|
||||
#define GLX_TEXTURE_TARGET_EXT 0x20D6
|
||||
#define GLX_TEXTURE_2D_EXT 0x20DC
|
||||
#define GLX_TEXTURE_FORMAT_EXT 0x20D5
|
||||
#define GLX_TEXTURE_FORMAT_RGB_EXT 0x20D9
|
||||
#define GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB 0x00000002
|
||||
#define GLX_CONTEXT_MAJOR_VERSION_ARB 0x2091
|
||||
#define GLX_CONTEXT_MINOR_VERSION_ARB 0x2092
|
||||
#define GLX_CONTEXT_FLAGS_ARB 0x2094
|
||||
|
||||
typedef struct {
|
||||
void *egl_library;
|
||||
void *gl_library;
|
||||
Display *dpy;
|
||||
EGLDisplay egl_display;
|
||||
EGLSurface egl_surface;
|
||||
EGLContext egl_context;
|
||||
Window window;
|
||||
|
||||
EGLDisplay (*eglGetDisplay)(EGLNativeDisplayType display_id);
|
||||
unsigned int (*eglInitialize)(EGLDisplay dpy, int32_t *major, int32_t *minor);
|
||||
unsigned int (*eglChooseConfig)(EGLDisplay dpy, const int32_t *attrib_list, EGLConfig *configs, int32_t config_size, int32_t *num_config);
|
||||
EGLSurface (*eglCreateWindowSurface)(EGLDisplay dpy, EGLConfig config, EGLNativeWindowType win, const int32_t *attrib_list);
|
||||
EGLContext (*eglCreateContext)(EGLDisplay dpy, EGLConfig config, EGLContext share_context, const int32_t *attrib_list);
|
||||
unsigned int (*eglMakeCurrent)(EGLDisplay dpy, EGLSurface draw, EGLSurface read, EGLContext ctx);
|
||||
EGLSurface (*eglCreatePixmapSurface)(EGLDisplay dpy, EGLConfig config, EGLNativePixmapType pixmap, const int32_t *attrib_list);
|
||||
EGLImage (*eglCreateImage)(EGLDisplay dpy, EGLContext ctx, unsigned int target, EGLClientBuffer buffer, const intptr_t *attrib_list);
|
||||
unsigned int (*eglBindTexImage)(EGLDisplay dpy, EGLSurface surface, int32_t buffer);
|
||||
unsigned int (*eglSwapInterval)(EGLDisplay dpy, int32_t interval);
|
||||
unsigned int (*eglSwapBuffers)(EGLDisplay dpy, EGLSurface surface);
|
||||
__eglMustCastToProperFunctionPointerType (*eglGetProcAddress)(const char *procname);
|
||||
|
||||
unsigned int (*eglExportDMABUFImageQueryMESA)(EGLDisplay dpy, EGLImageKHR image, int *fourcc, int *num_planes, uint64_t *modifiers);
|
||||
unsigned int (*eglExportDMABUFImageMESA)(EGLDisplay dpy, EGLImageKHR image, int *fds, int32_t *strides, int32_t *offsets);
|
||||
void (*glEGLImageTargetTexture2DOES)(unsigned int target, GLeglImageOES image);
|
||||
|
||||
unsigned int (*glGetError)(void);
|
||||
const unsigned char* (*glGetString)(unsigned int name);
|
||||
void (*glClear)(unsigned int mask);
|
||||
void (*glClearColor)(float red, float green, float blue, float alpha);
|
||||
void (*glGenTextures)(int n, unsigned int *textures);
|
||||
void (*glDeleteTextures)(int n, const unsigned int *texture);
|
||||
void (*glBindTexture)(unsigned int target, unsigned int texture);
|
||||
void (*glTexParameteri)(unsigned int target, unsigned int pname, int param);
|
||||
void (*glGetTexLevelParameteriv)(unsigned int target, int level, unsigned int pname, int *params);
|
||||
void (*glTexImage2D)(unsigned int target, int level, int internalFormat, int width, int height, int border, unsigned int format, unsigned int type, const void *pixels);
|
||||
void (*glCopyImageSubData)(unsigned int srcName, unsigned int srcTarget, int srcLevel, int srcX, int srcY, int srcZ, unsigned int dstName, unsigned int dstTarget, int dstLevel, int dstX, int dstY, int dstZ, int srcWidth, int srcHeight, int srcDepth);
|
||||
void (*glGenFramebuffers)(int n, unsigned int *framebuffers);
|
||||
void (*glBindFramebuffer)(unsigned int target, unsigned int framebuffer);
|
||||
void (*glViewport)(int x, int y, int width, int height);
|
||||
void (*glFramebufferTexture2D)(unsigned int target, unsigned int attachment, unsigned int textarget, unsigned int texture, int level);
|
||||
void (*glDrawBuffers)(int n, const unsigned int *bufs);
|
||||
unsigned int (*glCheckFramebufferStatus)(unsigned int target);
|
||||
void (*glBindBuffer)(unsigned int target, unsigned int buffer);
|
||||
void (*glGenBuffers)(int n, unsigned int *buffers);
|
||||
void (*glBufferData)(unsigned int target, khronos_ssize_t size, const void *data, unsigned int usage);
|
||||
int (*glGetUniformLocation)(unsigned int program, const char *name);
|
||||
void (*glGenVertexArrays)(int n, unsigned int *arrays);
|
||||
void (*glBindVertexArray)(unsigned int array);
|
||||
|
||||
unsigned int (*glCreateProgram)(void);
|
||||
unsigned int (*glCreateShader)(unsigned int type);
|
||||
void (*glAttachShader)(unsigned int program, unsigned int shader);
|
||||
void (*glBindAttribLocation)(unsigned int program, unsigned int index, const char *name);
|
||||
void (*glCompileShader)(unsigned int shader);
|
||||
void (*glLinkProgram)(unsigned int program);
|
||||
void (*glShaderSource)(unsigned int shader, int count, const char *const*string, const int *length);
|
||||
void (*glUseProgram)(unsigned int program);
|
||||
void (*glGetProgramInfoLog)(unsigned int program, int bufSize, int *length, char *infoLog);
|
||||
void (*glGetShaderiv)(unsigned int shader, unsigned int pname, int *params);
|
||||
void (*glGetShaderInfoLog)(unsigned int shader, int bufSize, int *length, char *infoLog);
|
||||
void (*glGetShaderSource)(unsigned int shader, int bufSize, int *length, char *source);
|
||||
void (*glDeleteProgram)(unsigned int program);
|
||||
void (*glDeleteShader)(unsigned int shader);
|
||||
void (*glGetProgramiv)(unsigned int program, unsigned int pname, int *params);
|
||||
void (*glVertexAttribPointer)(unsigned int index, int size, unsigned int type, unsigned char normalized, int stride, const void *pointer);
|
||||
void (*glEnableVertexAttribArray)(unsigned int index);
|
||||
void (*glDrawArrays)(unsigned int mode, int first, int count );
|
||||
void (*glReadBuffer)( unsigned int mode );
|
||||
void (*glReadPixels)(int x, int y,
|
||||
int width, int height,
|
||||
unsigned int format, unsigned int type,
|
||||
void *pixels );
|
||||
} gsr_egl;
|
||||
|
||||
bool gsr_egl_load(gsr_egl *self, Display *dpy);
|
||||
bool gsr_egl_make_context_current(gsr_egl *self);
|
||||
void gsr_egl_unload(gsr_egl *self);
|
||||
|
||||
#endif /* GSR_EGL_H */
|
102
include/gl.h
102
include/gl.h
@ -1,102 +0,0 @@
|
||||
#ifndef GSR_GL_H
|
||||
#define GSR_GL_H
|
||||
|
||||
/* OpenGL library with a hidden window context (to allow using the opengl functions) */
|
||||
|
||||
#include <X11/X.h>
|
||||
#include <X11/Xutil.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
typedef XID GLXPixmap;
|
||||
typedef XID GLXDrawable;
|
||||
typedef XID GLXWindow;
|
||||
|
||||
typedef struct __GLXcontextRec *GLXContext;
|
||||
typedef struct __GLXFBConfigRec *GLXFBConfig;
|
||||
|
||||
#define GL_TEXTURE_2D 0x0DE1
|
||||
#define GL_RGB 0x1907
|
||||
#define GL_UNSIGNED_BYTE 0x1401
|
||||
#define GL_COLOR_BUFFER_BIT 0x00004000
|
||||
#define GL_TEXTURE_WRAP_S 0x2802
|
||||
#define GL_TEXTURE_WRAP_T 0x2803
|
||||
#define GL_TEXTURE_MAG_FILTER 0x2800
|
||||
#define GL_TEXTURE_MIN_FILTER 0x2801
|
||||
#define GL_TEXTURE_WIDTH 0x1000
|
||||
#define GL_TEXTURE_HEIGHT 0x1001
|
||||
#define GL_NEAREST 0x2600
|
||||
#define GL_CLAMP_TO_EDGE 0x812F
|
||||
#define GL_LINEAR 0x2601
|
||||
|
||||
#define GL_RENDERER 0x1F01
|
||||
|
||||
#define GLX_BUFFER_SIZE 2
|
||||
#define GLX_DOUBLEBUFFER 5
|
||||
#define GLX_RED_SIZE 8
|
||||
#define GLX_GREEN_SIZE 9
|
||||
#define GLX_BLUE_SIZE 10
|
||||
#define GLX_ALPHA_SIZE 11
|
||||
#define GLX_DEPTH_SIZE 12
|
||||
|
||||
#define GLX_RGBA_BIT 0x00000001
|
||||
#define GLX_RENDER_TYPE 0x8011
|
||||
#define GLX_FRONT_EXT 0x20DE
|
||||
#define GLX_BIND_TO_TEXTURE_RGB_EXT 0x20D0
|
||||
#define GLX_DRAWABLE_TYPE 0x8010
|
||||
#define GLX_WINDOW_BIT 0x00000001
|
||||
#define GLX_PIXMAP_BIT 0x00000002
|
||||
#define GLX_BIND_TO_TEXTURE_TARGETS_EXT 0x20D3
|
||||
#define GLX_TEXTURE_2D_BIT_EXT 0x00000002
|
||||
#define GLX_TEXTURE_TARGET_EXT 0x20D6
|
||||
#define GLX_TEXTURE_2D_EXT 0x20DC
|
||||
#define GLX_TEXTURE_FORMAT_EXT 0x20D5
|
||||
#define GLX_TEXTURE_FORMAT_RGB_EXT 0x20D9
|
||||
#define GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB 0x00000002
|
||||
#define GLX_CONTEXT_MAJOR_VERSION_ARB 0x2091
|
||||
#define GLX_CONTEXT_MINOR_VERSION_ARB 0x2092
|
||||
#define GLX_CONTEXT_FLAGS_ARB 0x2094
|
||||
|
||||
typedef struct {
|
||||
void *library;
|
||||
Display *dpy;
|
||||
GLXFBConfig *fbconfigs;
|
||||
XVisualInfo *visual_info;
|
||||
GLXFBConfig fbconfig;
|
||||
Colormap colormap;
|
||||
GLXContext gl_context;
|
||||
Window window;
|
||||
|
||||
GLXPixmap (*glXCreatePixmap)(Display *dpy, GLXFBConfig config, Pixmap pixmap, const int *attribList);
|
||||
void (*glXDestroyPixmap)(Display *dpy, GLXPixmap pixmap);
|
||||
void (*glXBindTexImageEXT)(Display *dpy, GLXDrawable drawable, int buffer, const int *attrib_list);
|
||||
void (*glXReleaseTexImageEXT)(Display *dpy, GLXDrawable drawable, int buffer);
|
||||
GLXFBConfig* (*glXChooseFBConfig)(Display *dpy, int screen, const int *attribList, int *nitems);
|
||||
XVisualInfo* (*glXGetVisualFromFBConfig)(Display *dpy, GLXFBConfig config);
|
||||
GLXContext (*glXCreateContextAttribsARB)(Display *dpy, GLXFBConfig config, GLXContext share_context, Bool direct, const int *attrib_list);
|
||||
Bool (*glXMakeContextCurrent)(Display *dpy, GLXDrawable draw, GLXDrawable read, GLXContext ctx);
|
||||
void (*glXDestroyContext)(Display *dpy, GLXContext ctx);
|
||||
void (*glXSwapBuffers)(Display *dpy, GLXDrawable drawable);
|
||||
|
||||
void (*glXSwapIntervalEXT)(Display *dpy, GLXDrawable drawable, int interval);
|
||||
int (*glXSwapIntervalMESA)(unsigned int interval);
|
||||
int (*glXSwapIntervalSGI)(int interval);
|
||||
|
||||
void (*glClearTexImage)(unsigned int texture, unsigned int level, unsigned int format, unsigned int type, const void *data);
|
||||
|
||||
unsigned int (*glGetError)(void);
|
||||
const unsigned char* (*glGetString)(unsigned int name);
|
||||
void (*glClear)(unsigned int mask);
|
||||
void (*glGenTextures)(int n, unsigned int *textures);
|
||||
void (*glDeleteTextures)(int n, const unsigned int *texture);
|
||||
void (*glBindTexture)(unsigned int target, unsigned int texture);
|
||||
void (*glTexParameteri)(unsigned int target, unsigned int pname, int param);
|
||||
void (*glGetTexLevelParameteriv)(unsigned int target, int level, unsigned int pname, int *params);
|
||||
void (*glTexImage2D)(unsigned int target, int level, int internalFormat, int width, int height, int border, unsigned int format, unsigned int type, const void *pixels);
|
||||
void (*glCopyImageSubData)(unsigned int srcName, unsigned int srcTarget, int srcLevel, int srcX, int srcY, int srcZ, unsigned int dstName, unsigned int dstTarget, int dstLevel, int dstX, int dstY, int dstZ, int srcWidth, int srcHeight, int srcDepth);
|
||||
} gsr_gl;
|
||||
|
||||
bool gsr_gl_load(gsr_gl *self, Display *dpy);
|
||||
bool gsr_gl_make_context_current(gsr_gl *self);
|
||||
void gsr_gl_unload(gsr_gl *self);
|
||||
|
||||
#endif /* GSR_GL_H */
|
@ -1,20 +1,22 @@
|
||||
#ifndef WINDOW_TEXTURE_H
|
||||
#define WINDOW_TEXTURE_H
|
||||
|
||||
#include "gl.h"
|
||||
#include "egl.h"
|
||||
|
||||
typedef struct {
|
||||
Display *display;
|
||||
Window window;
|
||||
Pixmap pixmap;
|
||||
GLXPixmap glx_pixmap;
|
||||
unsigned int texture_id;
|
||||
unsigned int target_texture_id;
|
||||
int texture_width;
|
||||
int texture_height;
|
||||
int redirected;
|
||||
gsr_gl *gl;
|
||||
gsr_egl *egl;
|
||||
} WindowTexture;
|
||||
|
||||
/* Returns 0 on success */
|
||||
int window_texture_init(WindowTexture *window_texture, Display *display, Window window, gsr_gl *gl);
|
||||
int window_texture_init(WindowTexture *window_texture, Display *display, Window window, gsr_egl *egl);
|
||||
void window_texture_deinit(WindowTexture *self);
|
||||
|
||||
/*
|
||||
|
@ -13,3 +13,4 @@ xcomposite = ">=0.2"
|
||||
xrandr = ">=1"
|
||||
libpulse = ">=13"
|
||||
libswresample = ">=3"
|
||||
#libdrm = ">=2"
|
||||
|
@ -22,6 +22,7 @@ typedef struct {
|
||||
bool fbc_handle_created;
|
||||
|
||||
gsr_cuda cuda;
|
||||
bool frame_initialized;
|
||||
} gsr_capture_nvfbc;
|
||||
|
||||
#if defined(_WIN64) || defined(__LP64__)
|
||||
@ -52,28 +53,45 @@ static uint32_t get_output_id_from_display_name(NVFBC_RANDR_OUTPUT_INFO *outputs
|
||||
}
|
||||
|
||||
/* TODO: Test with optimus and open kernel modules */
|
||||
static bool driver_supports_direct_capture_cursor() {
|
||||
static bool get_driver_version(int *major, int *minor) {
|
||||
*major = 0;
|
||||
*minor = 0;
|
||||
|
||||
FILE *f = fopen("/proc/driver/nvidia/version", "rb");
|
||||
if(!f)
|
||||
if(!f) {
|
||||
fprintf(stderr, "gsr warning: failed to get nvidia driver version (failed to read /proc/driver/nvidia/version)\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
char buffer[2048];
|
||||
size_t bytes_read = fread(buffer, 1, sizeof(buffer) - 1, f);
|
||||
buffer[bytes_read] = '\0';
|
||||
|
||||
bool supports_cursor = false;
|
||||
bool success = false;
|
||||
const char *p = strstr(buffer, "Kernel Module");
|
||||
if(p) {
|
||||
p += 13;
|
||||
int driver_major_version = 0, driver_minor_version = 0;
|
||||
if(sscanf(p, "%d.%d", &driver_major_version, &driver_minor_version) == 2) {
|
||||
if(driver_major_version > 515 || (driver_major_version == 515 && driver_minor_version >= 57))
|
||||
supports_cursor = true;
|
||||
*major = driver_major_version;
|
||||
*minor = driver_minor_version;
|
||||
success = true;
|
||||
}
|
||||
}
|
||||
|
||||
if(!success)
|
||||
fprintf(stderr, "gsr warning: failed to get nvidia driver version\n");
|
||||
|
||||
fclose(f);
|
||||
return supports_cursor;
|
||||
return success;
|
||||
}
|
||||
|
||||
static bool version_at_least(int major, int minor, int expected_major, int expected_minor) {
|
||||
return major > expected_major || (major == expected_major && minor >= expected_minor);
|
||||
}
|
||||
|
||||
static bool version_less_than(int major, int minor, int expected_major, int expected_minor) {
|
||||
return major < expected_major || (major == expected_major && minor < expected_minor);
|
||||
}
|
||||
|
||||
static bool gsr_capture_nvfbc_load_library(gsr_capture *cap) {
|
||||
@ -180,6 +198,30 @@ static int gsr_capture_nvfbc_start(gsr_capture *cap, AVCodecContext *video_codec
|
||||
|
||||
const bool capture_region = (x > 0 || y > 0 || width > 0 || height > 0);
|
||||
|
||||
bool supports_direct_cursor = false;
|
||||
bool direct_capture = cap_nvfbc->params.direct_capture;
|
||||
int driver_major_version = 0;
|
||||
int driver_minor_version = 0;
|
||||
if(direct_capture && get_driver_version(&driver_major_version, &driver_minor_version)) {
|
||||
fprintf(stderr, "Info: detected nvidia version: %d.%d\n", driver_major_version, driver_minor_version);
|
||||
|
||||
if(version_at_least(driver_major_version, driver_minor_version, 515, 57) && version_less_than(driver_major_version, driver_minor_version, 520, 56)) {
|
||||
direct_capture = false;
|
||||
fprintf(stderr, "Warning: \"screen-direct\" has temporary been disabled as it causes stuttering with driver versions >= 515.57 and < 520.56. Please update your driver if possible. Capturing \"screen\" instead.\n");
|
||||
}
|
||||
|
||||
// TODO:
|
||||
// Cursor capture disabled because moving the cursor doesn't update capture rate to monitor hz and instead captures at 10-30 hz
|
||||
/*
|
||||
if(direct_capture) {
|
||||
if(version_at_least(driver_major_version, driver_minor_version, 515, 57))
|
||||
supports_direct_cursor = true;
|
||||
else
|
||||
fprintf(stderr, "Info: capturing \"screen-direct\" but driver version appears to be less than 515.57. Disabling capture of cursor. Please update your driver if you want to capture your cursor or record \"screen\" instead.\n");
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
NVFBCSTATUS status;
|
||||
NVFBC_TRACKING_TYPE tracking_type;
|
||||
bool capture_session_created = false;
|
||||
@ -245,11 +287,11 @@ static int gsr_capture_nvfbc_start(gsr_capture *cap, AVCodecContext *video_codec
|
||||
memset(&create_capture_params, 0, sizeof(create_capture_params));
|
||||
create_capture_params.dwVersion = NVFBC_CREATE_CAPTURE_SESSION_PARAMS_VER;
|
||||
create_capture_params.eCaptureType = NVFBC_CAPTURE_SHARED_CUDA;
|
||||
create_capture_params.bWithCursor = (!cap_nvfbc->params.direct_capture || driver_supports_direct_capture_cursor()) ? NVFBC_TRUE : NVFBC_FALSE;
|
||||
create_capture_params.bWithCursor = (!direct_capture || supports_direct_cursor) ? NVFBC_TRUE : NVFBC_FALSE;
|
||||
if(capture_region)
|
||||
create_capture_params.captureBox = (NVFBC_BOX){ x, y, width, height };
|
||||
create_capture_params.eTrackingType = tracking_type;
|
||||
create_capture_params.dwSamplingRateMs = 1000u / (uint32_t)cap_nvfbc->params.fps;
|
||||
create_capture_params.dwSamplingRateMs = 1000u / ((uint32_t)cap_nvfbc->params.fps + 1);
|
||||
create_capture_params.bAllowDirectCapture = cap_nvfbc->params.direct_capture ? NVFBC_TRUE : NVFBC_FALSE;
|
||||
create_capture_params.bPushModel = cap_nvfbc->params.direct_capture ? NVFBC_TRUE : NVFBC_FALSE;
|
||||
if(tracking_type == NVFBC_TRACKING_OUTPUT)
|
||||
@ -324,6 +366,16 @@ static void gsr_capture_nvfbc_destroy_session(gsr_capture *cap) {
|
||||
cap_nvfbc->nv_fbc_handle = 0;
|
||||
}
|
||||
|
||||
static void gsr_capture_nvfbc_tick(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame **frame) {
|
||||
gsr_capture_nvfbc *cap_nvfbc = cap->priv;
|
||||
if(!cap_nvfbc->frame_initialized && video_codec_context->hw_frames_ctx) {
|
||||
cap_nvfbc->frame_initialized = true;
|
||||
(*frame)->hw_frames_ctx = video_codec_context->hw_frames_ctx;
|
||||
(*frame)->buf[0] = av_buffer_pool_get(((AVHWFramesContext*)video_codec_context->hw_frames_ctx->data)->pool);
|
||||
(*frame)->extended_data = (*frame)->data;
|
||||
}
|
||||
}
|
||||
|
||||
static int gsr_capture_nvfbc_capture(gsr_capture *cap, AVFrame *frame) {
|
||||
gsr_capture_nvfbc *cap_nvfbc = cap->priv;
|
||||
|
||||
@ -338,6 +390,7 @@ static int gsr_capture_nvfbc_capture(gsr_capture *cap, AVFrame *frame) {
|
||||
grab_params.dwFlags = NVFBC_TOCUDA_GRAB_FLAGS_NOWAIT;/* | NVFBC_TOCUDA_GRAB_FLAGS_FORCE_REFRESH;*/
|
||||
grab_params.pFrameGrabInfo = &frame_info;
|
||||
grab_params.pCUDADeviceBuffer = &cu_device_ptr;
|
||||
grab_params.dwTimeoutMs = 0;
|
||||
|
||||
NVFBCSTATUS status = cap_nvfbc->nv_fbc_function_list.nvFBCToCudaGrabFrame(cap_nvfbc->nv_fbc_handle, &grab_params);
|
||||
if(status != NVFBC_SUCCESS) {
|
||||
@ -406,7 +459,7 @@ gsr_capture* gsr_capture_nvfbc_create(const gsr_capture_nvfbc_params *params) {
|
||||
|
||||
*cap = (gsr_capture) {
|
||||
.start = gsr_capture_nvfbc_start,
|
||||
.tick = NULL,
|
||||
.tick = gsr_capture_nvfbc_tick,
|
||||
.should_stop = NULL,
|
||||
.capture = gsr_capture_nvfbc_capture,
|
||||
.destroy = gsr_capture_nvfbc_destroy,
|
||||
|
@ -1,5 +1,5 @@
|
||||
#include "../../include/capture/xcomposite.h"
|
||||
#include "../../include/gl.h"
|
||||
#include "../../include/capture/xcomposite_cuda.h"
|
||||
#include "../../include/egl.h"
|
||||
#include "../../include/cuda.h"
|
||||
#include "../../include/window_texture.h"
|
||||
#include "../../include/time.h"
|
||||
@ -9,10 +9,8 @@
|
||||
#include <libavutil/frame.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
|
||||
/* TODO: Proper error checks and cleanups */
|
||||
|
||||
typedef struct {
|
||||
gsr_capture_xcomposite_params params;
|
||||
gsr_capture_xcomposite_cuda_params params;
|
||||
Display *dpy;
|
||||
XEvent xev;
|
||||
bool should_stop;
|
||||
@ -32,9 +30,9 @@ typedef struct {
|
||||
CUgraphicsResource cuda_graphics_resource;
|
||||
CUarray mapped_array;
|
||||
|
||||
gsr_gl gl;
|
||||
gsr_egl egl;
|
||||
gsr_cuda cuda;
|
||||
} gsr_capture_xcomposite;
|
||||
} gsr_capture_xcomposite_cuda;
|
||||
|
||||
static int max_int(int a, int b) {
|
||||
return a > b ? a : b;
|
||||
@ -44,58 +42,9 @@ static int min_int(int a, int b) {
|
||||
return a < b ? a : b;
|
||||
}
|
||||
|
||||
static void gsr_capture_xcomposite_stop(gsr_capture *cap, AVCodecContext *video_codec_context);
|
||||
static void gsr_capture_xcomposite_cuda_stop(gsr_capture *cap, AVCodecContext *video_codec_context);
|
||||
|
||||
static Window get_compositor_window(Display *display) {
|
||||
Window overlay_window = XCompositeGetOverlayWindow(display, DefaultRootWindow(display));
|
||||
XCompositeReleaseOverlayWindow(display, DefaultRootWindow(display));
|
||||
|
||||
Window root_window, parent_window;
|
||||
Window *children = NULL;
|
||||
unsigned int num_children = 0;
|
||||
if(XQueryTree(display, overlay_window, &root_window, &parent_window, &children, &num_children) == 0)
|
||||
return None;
|
||||
|
||||
Window compositor_window = None;
|
||||
if(num_children == 1) {
|
||||
compositor_window = children[0];
|
||||
const int screen_width = XWidthOfScreen(DefaultScreenOfDisplay(display));
|
||||
const int screen_height = XHeightOfScreen(DefaultScreenOfDisplay(display));
|
||||
|
||||
XWindowAttributes attr;
|
||||
if(!XGetWindowAttributes(display, compositor_window, &attr) || attr.width != screen_width || attr.height != screen_height)
|
||||
compositor_window = None;
|
||||
}
|
||||
|
||||
if(children)
|
||||
XFree(children);
|
||||
|
||||
return compositor_window;
|
||||
}
|
||||
|
||||
/* TODO: check for glx swap control extension string (GLX_EXT_swap_control, etc) */
|
||||
static void set_vertical_sync_enabled(Display *display, Window window, gsr_gl *gl, bool enabled) {
|
||||
int result = 0;
|
||||
|
||||
if(gl->glXSwapIntervalEXT) {
|
||||
gl->glXSwapIntervalEXT(display, window, enabled ? 1 : 0);
|
||||
} else if(gl->glXSwapIntervalMESA) {
|
||||
result = gl->glXSwapIntervalMESA(enabled ? 1 : 0);
|
||||
} else if(gl->glXSwapIntervalSGI) {
|
||||
result = gl->glXSwapIntervalSGI(enabled ? 1 : 0);
|
||||
} else {
|
||||
static int warned = 0;
|
||||
if (!warned) {
|
||||
warned = 1;
|
||||
fprintf(stderr, "Warning: setting vertical sync not supported\n");
|
||||
}
|
||||
}
|
||||
|
||||
if(result != 0)
|
||||
fprintf(stderr, "Warning: setting vertical sync failed\n");
|
||||
}
|
||||
|
||||
static bool cuda_register_opengl_texture(gsr_capture_xcomposite *cap_xcomp) {
|
||||
static bool cuda_register_opengl_texture(gsr_capture_xcomposite_cuda *cap_xcomp) {
|
||||
CUresult res;
|
||||
CUcontext old_ctx;
|
||||
res = cap_xcomp->cuda.cuCtxPushCurrent_v2(cap_xcomp->cuda.cu_ctx);
|
||||
@ -112,23 +61,22 @@ static bool cuda_register_opengl_texture(gsr_capture_xcomposite *cap_xcomp) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Get texture */
|
||||
res = cap_xcomp->cuda.cuGraphicsResourceSetMapFlags(cap_xcomp->cuda_graphics_resource, CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY);
|
||||
res = cap_xcomp->cuda.cuGraphicsMapResources(1, &cap_xcomp->cuda_graphics_resource, 0);
|
||||
|
||||
/* Map texture to cuda array */
|
||||
res = cap_xcomp->cuda.cuGraphicsSubResourceGetMappedArray(&cap_xcomp->mapped_array, cap_xcomp->cuda_graphics_resource, 0, 0);
|
||||
res = cap_xcomp->cuda.cuCtxPopCurrent_v2(&old_ctx);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool cuda_create_codec_context(gsr_capture_xcomposite *cap_xcomp, AVCodecContext *video_codec_context) {
|
||||
static bool cuda_create_codec_context(gsr_capture_xcomposite_cuda *cap_xcomp, AVCodecContext *video_codec_context) {
|
||||
CUcontext old_ctx;
|
||||
cap_xcomp->cuda.cuCtxPushCurrent_v2(cap_xcomp->cuda.cu_ctx);
|
||||
|
||||
AVBufferRef *device_ctx = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_CUDA);
|
||||
if(!device_ctx) {
|
||||
fprintf(stderr, "Error: Failed to create hardware device context\n");
|
||||
cap_xcomp->cuda.cuCtxPopCurrent_v2(&old_ctx);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -173,7 +121,7 @@ static bool cuda_create_codec_context(gsr_capture_xcomposite *cap_xcomp, AVCodec
|
||||
return true;
|
||||
}
|
||||
|
||||
static unsigned int gl_create_texture(gsr_capture_xcomposite *cap_xcomp, int width, int height) {
|
||||
static unsigned int gl_create_texture(gsr_capture_xcomposite_cuda *cap_xcomp, int width, int height) {
|
||||
// Generating this second texture is needed because
|
||||
// cuGraphicsGLRegisterImage cant be used with the texture that is mapped
|
||||
// directly to the pixmap.
|
||||
@ -182,25 +130,25 @@ static unsigned int gl_create_texture(gsr_capture_xcomposite *cap_xcomp, int wid
|
||||
// then needed every frame.
|
||||
// Ignoring failure for now.. TODO: Show proper error
|
||||
unsigned int texture_id = 0;
|
||||
cap_xcomp->gl.glGenTextures(1, &texture_id);
|
||||
cap_xcomp->gl.glBindTexture(GL_TEXTURE_2D, texture_id);
|
||||
cap_xcomp->gl.glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
|
||||
cap_xcomp->egl.glGenTextures(1, &texture_id);
|
||||
cap_xcomp->egl.glBindTexture(GL_TEXTURE_2D, texture_id);
|
||||
cap_xcomp->egl.glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
|
||||
|
||||
cap_xcomp->gl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
cap_xcomp->gl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
cap_xcomp->gl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
||||
cap_xcomp->gl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
cap_xcomp->egl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
cap_xcomp->egl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
cap_xcomp->egl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
||||
cap_xcomp->egl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
|
||||
cap_xcomp->gl.glBindTexture(GL_TEXTURE_2D, 0);
|
||||
cap_xcomp->egl.glBindTexture(GL_TEXTURE_2D, 0);
|
||||
return texture_id;
|
||||
}
|
||||
|
||||
static int gsr_capture_xcomposite_start(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
gsr_capture_xcomposite *cap_xcomp = cap->priv;
|
||||
static int gsr_capture_xcomposite_cuda_start(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
gsr_capture_xcomposite_cuda *cap_xcomp = cap->priv;
|
||||
|
||||
XWindowAttributes attr;
|
||||
if(!XGetWindowAttributes(cap_xcomp->dpy, cap_xcomp->params.window, &attr)) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_start failed: invalid window id: %lu\n", cap_xcomp->params.window);
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_cuda_start failed: invalid window id: %lu\n", cap_xcomp->params.window);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -211,32 +159,33 @@ static int gsr_capture_xcomposite_start(gsr_capture *cap, AVCodecContext *video_
|
||||
|
||||
XSelectInput(cap_xcomp->dpy, cap_xcomp->params.window, StructureNotifyMask | ExposureMask);
|
||||
|
||||
if(!gsr_gl_load(&cap_xcomp->gl, cap_xcomp->dpy)) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_start: failed to load opengl\n");
|
||||
if(!gsr_egl_load(&cap_xcomp->egl, cap_xcomp->dpy)) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_cuda_start: failed to load opengl\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
set_vertical_sync_enabled(cap_xcomp->dpy, cap_xcomp->gl.window, &cap_xcomp->gl, false);
|
||||
if(window_texture_init(&cap_xcomp->window_texture, cap_xcomp->dpy, cap_xcomp->params.window, &cap_xcomp->gl) != 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_start: failed get window texture for window %ld\n", cap_xcomp->params.window);
|
||||
gsr_gl_unload(&cap_xcomp->gl);
|
||||
cap_xcomp->egl.eglSwapInterval(cap_xcomp->egl.egl_display, 0);
|
||||
// TODO: Fallback to composite window
|
||||
if(window_texture_init(&cap_xcomp->window_texture, cap_xcomp->dpy, cap_xcomp->params.window, &cap_xcomp->egl) != 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_cuda_start: failed get window texture for window %ld\n", cap_xcomp->params.window);
|
||||
gsr_egl_unload(&cap_xcomp->egl);
|
||||
return -1;
|
||||
}
|
||||
|
||||
cap_xcomp->gl.glBindTexture(GL_TEXTURE_2D, window_texture_get_opengl_texture_id(&cap_xcomp->window_texture));
|
||||
cap_xcomp->egl.glBindTexture(GL_TEXTURE_2D, window_texture_get_opengl_texture_id(&cap_xcomp->window_texture));
|
||||
cap_xcomp->texture_size.x = 0;
|
||||
cap_xcomp->texture_size.y = 0;
|
||||
cap_xcomp->gl.glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &cap_xcomp->texture_size.x);
|
||||
cap_xcomp->gl.glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &cap_xcomp->texture_size.y);
|
||||
cap_xcomp->gl.glBindTexture(GL_TEXTURE_2D, 0);
|
||||
cap_xcomp->egl.glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &cap_xcomp->texture_size.x);
|
||||
cap_xcomp->egl.glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &cap_xcomp->texture_size.y);
|
||||
cap_xcomp->egl.glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
||||
cap_xcomp->texture_size.x = max_int(2, cap_xcomp->texture_size.x & ~1);
|
||||
cap_xcomp->texture_size.y = max_int(2, cap_xcomp->texture_size.y & ~1);
|
||||
|
||||
cap_xcomp->target_texture_id = gl_create_texture(cap_xcomp, cap_xcomp->texture_size.x, cap_xcomp->texture_size.y);
|
||||
if(cap_xcomp->target_texture_id == 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_start: failed to create opengl texture\n");
|
||||
gsr_capture_xcomposite_stop(cap, video_codec_context);
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_cuda_start: failed to create opengl texture\n");
|
||||
gsr_capture_xcomposite_cuda_stop(cap, video_codec_context);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -244,17 +193,17 @@ static int gsr_capture_xcomposite_start(gsr_capture *cap, AVCodecContext *video_
|
||||
video_codec_context->height = cap_xcomp->texture_size.y;
|
||||
|
||||
if(!gsr_cuda_load(&cap_xcomp->cuda)) {
|
||||
gsr_capture_xcomposite_stop(cap, video_codec_context);
|
||||
gsr_capture_xcomposite_cuda_stop(cap, video_codec_context);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if(!cuda_create_codec_context(cap_xcomp, video_codec_context)) {
|
||||
gsr_capture_xcomposite_stop(cap, video_codec_context);
|
||||
gsr_capture_xcomposite_cuda_stop(cap, video_codec_context);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if(!cuda_register_opengl_texture(cap_xcomp)) {
|
||||
gsr_capture_xcomposite_stop(cap, video_codec_context);
|
||||
gsr_capture_xcomposite_cuda_stop(cap, video_codec_context);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -262,13 +211,13 @@ static int gsr_capture_xcomposite_start(gsr_capture *cap, AVCodecContext *video_
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gsr_capture_xcomposite_stop(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
gsr_capture_xcomposite *cap_xcomp = cap->priv;
|
||||
static void gsr_capture_xcomposite_cuda_stop(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
gsr_capture_xcomposite_cuda *cap_xcomp = cap->priv;
|
||||
|
||||
window_texture_deinit(&cap_xcomp->window_texture);
|
||||
|
||||
if(cap_xcomp->target_texture_id) {
|
||||
cap_xcomp->gl.glDeleteTextures(1, &cap_xcomp->target_texture_id);
|
||||
cap_xcomp->egl.glDeleteTextures(1, &cap_xcomp->target_texture_id);
|
||||
cap_xcomp->target_texture_id = 0;
|
||||
}
|
||||
|
||||
@ -280,35 +229,42 @@ static void gsr_capture_xcomposite_stop(gsr_capture *cap, AVCodecContext *video_
|
||||
av_buffer_unref(&video_codec_context->hw_device_ctx);
|
||||
av_buffer_unref(&video_codec_context->hw_frames_ctx);
|
||||
|
||||
if(cap_xcomp->cuda.cu_ctx) {
|
||||
CUcontext old_ctx;
|
||||
cap_xcomp->cuda.cuCtxPushCurrent_v2(cap_xcomp->cuda.cu_ctx);
|
||||
|
||||
cap_xcomp->cuda.cuGraphicsUnmapResources(1, &cap_xcomp->cuda_graphics_resource, 0);
|
||||
cap_xcomp->cuda.cuGraphicsUnregisterResource(cap_xcomp->cuda_graphics_resource);
|
||||
cap_xcomp->cuda.cuCtxPopCurrent_v2(&old_ctx);
|
||||
}
|
||||
gsr_cuda_unload(&cap_xcomp->cuda);
|
||||
|
||||
gsr_gl_unload(&cap_xcomp->gl);
|
||||
gsr_egl_unload(&cap_xcomp->egl);
|
||||
if(cap_xcomp->dpy) {
|
||||
// TODO: Why is this crashing?
|
||||
XCloseDisplay(cap_xcomp->dpy);
|
||||
cap_xcomp->dpy = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void gsr_capture_xcomposite_tick(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame **frame) {
|
||||
gsr_capture_xcomposite *cap_xcomp = cap->priv;
|
||||
static void gsr_capture_xcomposite_cuda_tick(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame **frame) {
|
||||
gsr_capture_xcomposite_cuda *cap_xcomp = cap->priv;
|
||||
|
||||
cap_xcomp->gl.glClear(GL_COLOR_BUFFER_BIT);
|
||||
cap_xcomp->egl.glClear(GL_COLOR_BUFFER_BIT);
|
||||
|
||||
if(!cap_xcomp->created_hw_frame) {
|
||||
cap_xcomp->created_hw_frame = true;
|
||||
CUcontext old_ctx;
|
||||
cap_xcomp->cuda.cuCtxPushCurrent_v2(cap_xcomp->cuda.cu_ctx);
|
||||
|
||||
if(av_hwframe_get_buffer(video_codec_context->hw_frames_ctx, *frame, 0) < 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_tick: av_hwframe_get_buffer failed\n");
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_cuda_tick: av_hwframe_get_buffer failed\n");
|
||||
cap_xcomp->should_stop = true;
|
||||
cap_xcomp->stop_is_error = true;
|
||||
cap_xcomp->cuda.cuCtxPopCurrent_v2(&old_ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
cap_xcomp->created_hw_frame = true;
|
||||
cap_xcomp->cuda.cuCtxPopCurrent_v2(&old_ctx);
|
||||
}
|
||||
|
||||
@ -343,25 +299,25 @@ static void gsr_capture_xcomposite_tick(gsr_capture *cap, AVCodecContext *video_
|
||||
cap_xcomp->window_resized = false;
|
||||
fprintf(stderr, "Resize window!\n");
|
||||
if(window_texture_on_resize(&cap_xcomp->window_texture) != 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_tick: window_texture_on_resize failed\n");
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_cuda_tick: window_texture_on_resize failed\n");
|
||||
cap_xcomp->should_stop = true;
|
||||
cap_xcomp->stop_is_error = true;
|
||||
return;
|
||||
}
|
||||
|
||||
cap_xcomp->gl.glBindTexture(GL_TEXTURE_2D, window_texture_get_opengl_texture_id(&cap_xcomp->window_texture));
|
||||
cap_xcomp->egl.glBindTexture(GL_TEXTURE_2D, window_texture_get_opengl_texture_id(&cap_xcomp->window_texture));
|
||||
cap_xcomp->texture_size.x = 0;
|
||||
cap_xcomp->texture_size.y = 0;
|
||||
cap_xcomp->gl.glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &cap_xcomp->texture_size.x);
|
||||
cap_xcomp->gl.glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &cap_xcomp->texture_size.y);
|
||||
cap_xcomp->gl.glBindTexture(GL_TEXTURE_2D, 0);
|
||||
cap_xcomp->egl.glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &cap_xcomp->texture_size.x);
|
||||
cap_xcomp->egl.glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &cap_xcomp->texture_size.y);
|
||||
cap_xcomp->egl.glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
||||
cap_xcomp->texture_size.x = min_int(video_codec_context->width, max_int(2, cap_xcomp->texture_size.x & ~1));
|
||||
cap_xcomp->texture_size.y = min_int(video_codec_context->height, max_int(2, cap_xcomp->texture_size.y & ~1));
|
||||
|
||||
cap_xcomp->gl.glBindTexture(GL_TEXTURE_2D, cap_xcomp->target_texture_id);
|
||||
cap_xcomp->gl.glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, cap_xcomp->texture_size.x, cap_xcomp->texture_size.y, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
|
||||
cap_xcomp->gl.glBindTexture(GL_TEXTURE_2D, 0);
|
||||
cap_xcomp->egl.glBindTexture(GL_TEXTURE_2D, cap_xcomp->target_texture_id);
|
||||
cap_xcomp->egl.glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, cap_xcomp->texture_size.x, cap_xcomp->texture_size.y, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
|
||||
cap_xcomp->egl.glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
||||
CUcontext old_ctx;
|
||||
CUresult res = cap_xcomp->cuda.cuCtxPushCurrent_v2(cap_xcomp->cuda.cu_ctx);
|
||||
@ -372,7 +328,7 @@ static void gsr_capture_xcomposite_tick(gsr_capture *cap, AVCodecContext *video_
|
||||
if (res != CUDA_SUCCESS) {
|
||||
const char *err_str = "unknown";
|
||||
cap_xcomp->cuda.cuGetErrorString(res, &err_str);
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_tick: cuGraphicsGLRegisterImage failed, error %s, texture id: %u\n", err_str, cap_xcomp->target_texture_id);
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_cuda_tick: cuGraphicsGLRegisterImage failed, error %s, texture id: %u\n", err_str, cap_xcomp->target_texture_id);
|
||||
cap_xcomp->should_stop = true;
|
||||
cap_xcomp->stop_is_error = true;
|
||||
res = cap_xcomp->cuda.cuCtxPopCurrent_v2(&old_ctx);
|
||||
@ -386,7 +342,7 @@ static void gsr_capture_xcomposite_tick(gsr_capture *cap, AVCodecContext *video_
|
||||
av_frame_free(frame);
|
||||
*frame = av_frame_alloc();
|
||||
if(!frame) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_tick: failed to allocate frame\n");
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_cuda_tick: failed to allocate frame\n");
|
||||
cap_xcomp->should_stop = true;
|
||||
cap_xcomp->stop_is_error = true;
|
||||
res = cap_xcomp->cuda.cuCtxPopCurrent_v2(&old_ctx);
|
||||
@ -395,9 +351,10 @@ static void gsr_capture_xcomposite_tick(gsr_capture *cap, AVCodecContext *video_
|
||||
(*frame)->format = video_codec_context->pix_fmt;
|
||||
(*frame)->width = video_codec_context->width;
|
||||
(*frame)->height = video_codec_context->height;
|
||||
(*frame)->color_range = AVCOL_RANGE_JPEG;
|
||||
|
||||
if(av_hwframe_get_buffer(video_codec_context->hw_frames_ctx, *frame, 0) < 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_tick: av_hwframe_get_buffer failed\n");
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_cuda_tick: av_hwframe_get_buffer failed\n");
|
||||
cap_xcomp->should_stop = true;
|
||||
cap_xcomp->stop_is_error = true;
|
||||
res = cap_xcomp->cuda.cuCtxPopCurrent_v2(&old_ctx);
|
||||
@ -411,8 +368,8 @@ static void gsr_capture_xcomposite_tick(gsr_capture *cap, AVCodecContext *video_
|
||||
}
|
||||
}
|
||||
|
||||
static bool gsr_capture_xcomposite_should_stop(gsr_capture *cap, bool *err) {
|
||||
gsr_capture_xcomposite *cap_xcomp = cap->priv;
|
||||
static bool gsr_capture_xcomposite_cuda_should_stop(gsr_capture *cap, bool *err) {
|
||||
gsr_capture_xcomposite_cuda *cap_xcomp = cap->priv;
|
||||
if(cap_xcomp->should_stop) {
|
||||
if(err)
|
||||
*err = cap_xcomp->stop_is_error;
|
||||
@ -424,19 +381,19 @@ static bool gsr_capture_xcomposite_should_stop(gsr_capture *cap, bool *err) {
|
||||
return false;
|
||||
}
|
||||
|
||||
static int gsr_capture_xcomposite_capture(gsr_capture *cap, AVFrame *frame) {
|
||||
gsr_capture_xcomposite *cap_xcomp = cap->priv;
|
||||
static int gsr_capture_xcomposite_cuda_capture(gsr_capture *cap, AVFrame *frame) {
|
||||
gsr_capture_xcomposite_cuda *cap_xcomp = cap->priv;
|
||||
|
||||
// TODO: Use a framebuffer instead. glCopyImageSubData requires opengl 4.2
|
||||
vec2i source_pos = { 0, 0 };
|
||||
vec2i source_size = cap_xcomp->texture_size;
|
||||
|
||||
// Requires opengl 4.2... TODO: Replace with earlier opengl if opengl < 4.2.
|
||||
cap_xcomp->gl.glCopyImageSubData(
|
||||
cap_xcomp->egl.glCopyImageSubData(
|
||||
window_texture_get_opengl_texture_id(&cap_xcomp->window_texture), GL_TEXTURE_2D, 0, source_pos.x, source_pos.y, 0,
|
||||
cap_xcomp->target_texture_id, GL_TEXTURE_2D, 0, 0, 0, 0,
|
||||
source_size.x, source_size.y, 1);
|
||||
unsigned int err = cap_xcomp->gl.glGetError();
|
||||
unsigned int err = cap_xcomp->egl.glGetError();
|
||||
if(err != 0) {
|
||||
static bool error_shown = false;
|
||||
if(!error_shown) {
|
||||
@ -444,7 +401,7 @@ static int gsr_capture_xcomposite_capture(gsr_capture *cap, AVFrame *frame) {
|
||||
fprintf(stderr, "Error: glCopyImageSubData failed, gl error: %d\n", err);
|
||||
}
|
||||
}
|
||||
cap_xcomp->gl.glXSwapBuffers(cap_xcomp->dpy, cap_xcomp->gl.window);
|
||||
cap_xcomp->egl.eglSwapBuffers(cap_xcomp->egl.egl_display, cap_xcomp->egl.egl_surface);
|
||||
// TODO: Remove this copy, which is only possible by using nvenc directly and encoding window_pixmap.target_texture_id
|
||||
|
||||
frame->linesize[0] = frame->width * 4;
|
||||
@ -468,8 +425,8 @@ static int gsr_capture_xcomposite_capture(gsr_capture *cap, AVFrame *frame) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gsr_capture_xcomposite_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
gsr_capture_xcomposite_stop(cap, video_codec_context);
|
||||
static void gsr_capture_xcomposite_cuda_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
gsr_capture_xcomposite_cuda_stop(cap, video_codec_context);
|
||||
if(cap->priv) {
|
||||
free(cap->priv);
|
||||
cap->priv = NULL;
|
||||
@ -477,9 +434,9 @@ static void gsr_capture_xcomposite_destroy(gsr_capture *cap, AVCodecContext *vid
|
||||
free(cap);
|
||||
}
|
||||
|
||||
gsr_capture* gsr_capture_xcomposite_create(const gsr_capture_xcomposite_params *params) {
|
||||
gsr_capture* gsr_capture_xcomposite_cuda_create(const gsr_capture_xcomposite_cuda_params *params) {
|
||||
if(!params) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_create params is NULL\n");
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_cuda_create params is NULL\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -487,7 +444,7 @@ gsr_capture* gsr_capture_xcomposite_create(const gsr_capture_xcomposite_params *
|
||||
if(!cap)
|
||||
return NULL;
|
||||
|
||||
gsr_capture_xcomposite *cap_xcomp = calloc(1, sizeof(gsr_capture_xcomposite));
|
||||
gsr_capture_xcomposite_cuda *cap_xcomp = calloc(1, sizeof(gsr_capture_xcomposite_cuda));
|
||||
if(!cap_xcomp) {
|
||||
free(cap);
|
||||
return NULL;
|
||||
@ -495,7 +452,7 @@ gsr_capture* gsr_capture_xcomposite_create(const gsr_capture_xcomposite_params *
|
||||
|
||||
Display *display = XOpenDisplay(NULL);
|
||||
if(!display) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_create failed: XOpenDisplay failed\n");
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_cuda_create failed: XOpenDisplay failed\n");
|
||||
free(cap);
|
||||
free(cap_xcomp);
|
||||
return NULL;
|
||||
@ -505,11 +462,11 @@ gsr_capture* gsr_capture_xcomposite_create(const gsr_capture_xcomposite_params *
|
||||
cap_xcomp->params = *params;
|
||||
|
||||
*cap = (gsr_capture) {
|
||||
.start = gsr_capture_xcomposite_start,
|
||||
.tick = gsr_capture_xcomposite_tick,
|
||||
.should_stop = gsr_capture_xcomposite_should_stop,
|
||||
.capture = gsr_capture_xcomposite_capture,
|
||||
.destroy = gsr_capture_xcomposite_destroy,
|
||||
.start = gsr_capture_xcomposite_cuda_start,
|
||||
.tick = gsr_capture_xcomposite_cuda_tick,
|
||||
.should_stop = gsr_capture_xcomposite_cuda_should_stop,
|
||||
.capture = gsr_capture_xcomposite_cuda_capture,
|
||||
.destroy = gsr_capture_xcomposite_cuda_destroy,
|
||||
.priv = cap_xcomp
|
||||
};
|
||||
|
855
src/capture/xcomposite_drm.c
Normal file
855
src/capture/xcomposite_drm.c
Normal file
@ -0,0 +1,855 @@
|
||||
#include "../../include/capture/xcomposite_drm.h"
|
||||
#include "../../include/egl.h"
|
||||
#include "../../include/window_texture.h"
|
||||
#include "../../include/time.h"
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <X11/Xlib.h>
|
||||
#include <X11/extensions/Xcomposite.h>
|
||||
#include <libavutil/hwcontext.h>
|
||||
#include <libavutil/hwcontext_drm.h>
|
||||
#include <libavutil/frame.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
//#include <drm_fourcc.h>
|
||||
#include <assert.h>
|
||||
/* TODO: Proper error checks and cleanups */
|
||||
|
||||
typedef struct {
|
||||
gsr_capture_xcomposite_drm_params params;
|
||||
Display *dpy;
|
||||
XEvent xev;
|
||||
bool created_hw_frame;
|
||||
|
||||
vec2i window_pos;
|
||||
vec2i window_size;
|
||||
vec2i texture_size;
|
||||
double window_resize_timer;
|
||||
|
||||
WindowTexture window_texture;
|
||||
|
||||
gsr_egl egl;
|
||||
|
||||
int fourcc;
|
||||
int num_planes;
|
||||
uint64_t modifiers;
|
||||
int dmabuf_fd;
|
||||
int32_t stride;
|
||||
int32_t offset;
|
||||
|
||||
unsigned int target_texture_id;
|
||||
|
||||
unsigned int FramebufferName;
|
||||
unsigned int quad_VertexArrayID;
|
||||
unsigned int quad_vertexbuffer;
|
||||
unsigned int quadVAO;
|
||||
} gsr_capture_xcomposite_drm;
|
||||
|
||||
static int max_int(int a, int b) {
|
||||
return a > b ? a : b;
|
||||
}
|
||||
|
||||
static int min_int(int a, int b) {
|
||||
return a < b ? a : b;
|
||||
}
|
||||
|
||||
static bool drm_create_codec_context(gsr_capture_xcomposite_drm *cap_xcomp, AVCodecContext *video_codec_context) {
|
||||
// TODO: "/dev/dri/card0"
|
||||
AVBufferRef *device_ctx;
|
||||
if(av_hwdevice_ctx_create(&device_ctx, AV_HWDEVICE_TYPE_VAAPI, "/dev/dri/card0", NULL, 0) < 0) {
|
||||
fprintf(stderr, "Error: Failed to create hardware device context\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
AVBufferRef *frame_context = av_hwframe_ctx_alloc(device_ctx);
|
||||
if(!frame_context) {
|
||||
fprintf(stderr, "Error: Failed to create hwframe context\n");
|
||||
av_buffer_unref(&device_ctx);
|
||||
return false;
|
||||
}
|
||||
|
||||
AVHWFramesContext *hw_frame_context =
|
||||
(AVHWFramesContext *)frame_context->data;
|
||||
hw_frame_context->width = video_codec_context->width;
|
||||
hw_frame_context->height = video_codec_context->height;
|
||||
hw_frame_context->sw_format = AV_PIX_FMT_YUV420P;//AV_PIX_FMT_0RGB32;//AV_PIX_FMT_YUV420P;//AV_PIX_FMT_0RGB32;//AV_PIX_FMT_NV12;
|
||||
hw_frame_context->format = video_codec_context->pix_fmt;
|
||||
hw_frame_context->device_ref = device_ctx;
|
||||
hw_frame_context->device_ctx = (AVHWDeviceContext*)device_ctx->data;
|
||||
|
||||
if (av_hwframe_ctx_init(frame_context) < 0) {
|
||||
fprintf(stderr, "Error: Failed to initialize hardware frame context "
|
||||
"(note: ffmpeg version needs to be > 4.0)\n");
|
||||
av_buffer_unref(&device_ctx);
|
||||
av_buffer_unref(&frame_context);
|
||||
return false;
|
||||
}
|
||||
|
||||
video_codec_context->hw_device_ctx = device_ctx; // TODO: av_buffer_ref? and in more places
|
||||
video_codec_context->hw_frames_ctx = frame_context;
|
||||
return true;
|
||||
}
|
||||
|
||||
#define EGL_SURFACE_TYPE 0x3033
|
||||
#define EGL_WINDOW_BIT 0x0004
|
||||
#define EGL_PIXMAP_BIT 0x0002
|
||||
#define EGL_BIND_TO_TEXTURE_RGB 0x3039
|
||||
#define EGL_TRUE 1
|
||||
#define EGL_RED_SIZE 0x3024
|
||||
#define EGL_GREEN_SIZE 0x3023
|
||||
#define EGL_BLUE_SIZE 0x3022
|
||||
#define EGL_ALPHA_SIZE 0x3021
|
||||
#define EGL_TEXTURE_FORMAT 0x3080
|
||||
#define EGL_TEXTURE_RGB 0x305D
|
||||
#define EGL_TEXTURE_TARGET 0x3081
|
||||
#define EGL_TEXTURE_2D 0x305F
|
||||
#define EGL_GL_TEXTURE_2D 0x30B1
|
||||
|
||||
#define GL_RGBA 0x1908
|
||||
|
||||
static unsigned int gl_create_texture(gsr_capture_xcomposite_drm *cap_xcomp, int width, int height) {
|
||||
// Generating this second texture is needed because
|
||||
// cuGraphicsGLRegisterImage cant be used with the texture that is mapped
|
||||
// directly to the pixmap.
|
||||
// TODO: Investigate if it's somehow possible to use the pixmap texture
|
||||
// directly, this should improve performance since only less image copy is
|
||||
// then needed every frame.
|
||||
// Ignoring failure for now.. TODO: Show proper error
|
||||
unsigned int texture_id = 0;
|
||||
cap_xcomp->egl.glGenTextures(1, &texture_id);
|
||||
cap_xcomp->egl.glBindTexture(GL_TEXTURE_2D, texture_id);
|
||||
cap_xcomp->egl.glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
|
||||
|
||||
cap_xcomp->egl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
cap_xcomp->egl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
cap_xcomp->egl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
||||
cap_xcomp->egl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
|
||||
cap_xcomp->egl.glBindTexture(GL_TEXTURE_2D, 0);
|
||||
return texture_id;
|
||||
}
|
||||
|
||||
#define GL_COMPILE_STATUS 0x8B81
|
||||
#define GL_INFO_LOG_LENGTH 0x8B84
|
||||
|
||||
unsigned int esLoadShader ( gsr_capture_xcomposite_drm *cap_xcomp, unsigned int type, const char *shaderSrc ) {
|
||||
unsigned int shader;
|
||||
int compiled;
|
||||
|
||||
// Create the shader object
|
||||
shader = cap_xcomp->egl.glCreateShader ( type );
|
||||
|
||||
if ( shader == 0 )
|
||||
return 0;
|
||||
|
||||
// Load the shader source
|
||||
cap_xcomp->egl.glShaderSource ( shader, 1, &shaderSrc, NULL );
|
||||
|
||||
// Compile the shader
|
||||
cap_xcomp->egl.glCompileShader ( shader );
|
||||
|
||||
// Check the compile status
|
||||
cap_xcomp->egl.glGetShaderiv ( shader, GL_COMPILE_STATUS, &compiled );
|
||||
|
||||
if ( !compiled )
|
||||
{
|
||||
int infoLen = 0;
|
||||
|
||||
cap_xcomp->egl.glGetShaderiv ( shader, GL_INFO_LOG_LENGTH, &infoLen );
|
||||
|
||||
if ( infoLen > 1 )
|
||||
{
|
||||
char* infoLog = malloc (sizeof(char) * infoLen );
|
||||
|
||||
cap_xcomp->egl.glGetShaderInfoLog ( shader, infoLen, NULL, infoLog );
|
||||
fprintf (stderr, "Error compiling shader:\n%s\n", infoLog );
|
||||
|
||||
free ( infoLog );
|
||||
}
|
||||
|
||||
cap_xcomp->egl.glDeleteShader ( shader );
|
||||
return 0;
|
||||
}
|
||||
|
||||
return shader;
|
||||
|
||||
}
|
||||
|
||||
#define GL_FRAGMENT_SHADER 0x8B30
|
||||
#define GL_VERTEX_SHADER 0x8B31
|
||||
#define GL_COMPILE_STATUS 0x8B81
|
||||
#define GL_LINK_STATUS 0x8B82
|
||||
|
||||
|
||||
//
|
||||
///
|
||||
/// \brief Load a vertex and fragment shader, create a program object, link program.
|
||||
// Errors output to log.
|
||||
/// \param vertShaderSrc Vertex shader source code
|
||||
/// \param fragShaderSrc Fragment shader source code
|
||||
/// \return A new program object linked with the vertex/fragment shader pair, 0 on failure
|
||||
//
|
||||
unsigned int esLoadProgram ( gsr_capture_xcomposite_drm *cap_xcomp, const char *vertShaderSrc, const char *fragShaderSrc )
|
||||
{
|
||||
unsigned int vertexShader;
|
||||
unsigned int fragmentShader;
|
||||
unsigned int programObject;
|
||||
int linked;
|
||||
|
||||
// Load the vertex/fragment shaders
|
||||
vertexShader = esLoadShader ( cap_xcomp, GL_VERTEX_SHADER, vertShaderSrc );
|
||||
if ( vertexShader == 0 )
|
||||
return 0;
|
||||
|
||||
fragmentShader = esLoadShader ( cap_xcomp, GL_FRAGMENT_SHADER, fragShaderSrc );
|
||||
if ( fragmentShader == 0 )
|
||||
{
|
||||
cap_xcomp->egl.glDeleteShader( vertexShader );
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Create the program object
|
||||
programObject = cap_xcomp->egl.glCreateProgram ( );
|
||||
|
||||
if ( programObject == 0 )
|
||||
return 0;
|
||||
|
||||
cap_xcomp->egl.glAttachShader ( programObject, vertexShader );
|
||||
cap_xcomp->egl.glAttachShader ( programObject, fragmentShader );
|
||||
|
||||
// Link the program
|
||||
cap_xcomp->egl.glLinkProgram ( programObject );
|
||||
|
||||
// Check the link status
|
||||
cap_xcomp->egl.glGetProgramiv ( programObject, GL_LINK_STATUS, &linked );
|
||||
|
||||
if ( !linked )
|
||||
{
|
||||
int infoLen = 0;
|
||||
|
||||
cap_xcomp->egl.glGetProgramiv ( programObject, GL_INFO_LOG_LENGTH, &infoLen );
|
||||
|
||||
if ( infoLen > 1 )
|
||||
{
|
||||
char* infoLog = malloc (sizeof(char) * infoLen );
|
||||
|
||||
cap_xcomp->egl.glGetProgramInfoLog ( programObject, infoLen, NULL, infoLog );
|
||||
fprintf (stderr, "Error linking program:\n%s\n", infoLog );
|
||||
|
||||
free ( infoLog );
|
||||
}
|
||||
|
||||
cap_xcomp->egl.glDeleteProgram ( programObject );
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Free up no longer needed shader resources
|
||||
cap_xcomp->egl.glDeleteShader ( vertexShader );
|
||||
cap_xcomp->egl.glDeleteShader ( fragmentShader );
|
||||
|
||||
return programObject;
|
||||
}
|
||||
|
||||
static unsigned int shader_program = 0;
|
||||
static unsigned int texID = 0;
|
||||
|
||||
static void LoadShaders(gsr_capture_xcomposite_drm *cap_xcomp) {
|
||||
char vShaderStr[] =
|
||||
"#version 300 es \n"
|
||||
"in vec2 pos; \n"
|
||||
"in vec2 texcoords; \n"
|
||||
"out vec2 texcoords_out; \n"
|
||||
"void main() \n"
|
||||
"{ \n"
|
||||
" texcoords_out = texcoords; \n"
|
||||
" gl_Position = vec4(pos.x, pos.y, 0.0, 1.0); \n"
|
||||
"} \n";
|
||||
|
||||
#if 0
|
||||
char fShaderStr[] =
|
||||
"#version 300 es \n"
|
||||
"precision mediump float; \n"
|
||||
"in vec2 texcoords_out; \n"
|
||||
"uniform sampler2D tex; \n"
|
||||
"out vec4 FragColor; \n"
|
||||
|
||||
|
||||
"float imageWidth = 1920.0;\n"
|
||||
"float imageHeight = 1080.0;\n"
|
||||
|
||||
"float getYPixel(vec2 position) {\n"
|
||||
" position.y = (position.y * 2.0 / 3.0) + (1.0 / 3.0);\n"
|
||||
" return texture2D(tex, position).x;\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"vec2 mapCommon(vec2 position, float planarOffset) {\n"
|
||||
" planarOffset += (imageWidth * floor(position.y / 2.0)) / 2.0 +\n"
|
||||
" floor((imageWidth - 1.0 - position.x) / 2.0);\n"
|
||||
" float x = floor(imageWidth - 1.0 - floor(mod(planarOffset, imageWidth)));\n"
|
||||
" float y = floor(floor(planarOffset / imageWidth));\n"
|
||||
" return vec2((x + 0.5) / imageWidth, (y + 0.5) / (1.5 * imageHeight));\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"vec2 mapU(vec2 position) {\n"
|
||||
" float planarOffset = (imageWidth * imageHeight) / 4.0;\n"
|
||||
" return mapCommon(position, planarOffset);\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"vec2 mapV(vec2 position) {\n"
|
||||
" return mapCommon(position, 0.0);\n"
|
||||
"}\n"
|
||||
|
||||
"void main() \n"
|
||||
"{ \n"
|
||||
|
||||
"vec2 pixelPosition = vec2(floor(imageWidth * texcoords_out.x),\n"
|
||||
" floor(imageHeight * texcoords_out.y));\n"
|
||||
"pixelPosition -= vec2(0.5, 0.5);\n"
|
||||
"\n"
|
||||
"float yChannel = getYPixel(texcoords_out);\n"
|
||||
"float uChannel = texture2D(tex, mapU(pixelPosition)).x;\n"
|
||||
"float vChannel = texture2D(tex, mapV(pixelPosition)).x;\n"
|
||||
"vec4 channels = vec4(yChannel, uChannel, vChannel, 1.0);\n"
|
||||
"mat4 conversion = mat4(1.0, 0.0, 1.402, -0.701,\n"
|
||||
" 1.0, -0.344, -0.714, 0.529,\n"
|
||||
" 1.0, 1.772, 0.0, -0.886,\n"
|
||||
" 0, 0, 0, 0);\n"
|
||||
"vec3 rgb = (channels * conversion).xyz;\n"
|
||||
|
||||
" FragColor = vec4(rgb, 1.0); \n"
|
||||
"} \n";
|
||||
#elif 1
|
||||
char fShaderStr[] =
|
||||
"#version 300 es \n"
|
||||
"precision mediump float; \n"
|
||||
"in vec2 texcoords_out; \n"
|
||||
"uniform sampler2D tex; \n"
|
||||
"out vec4 FragColor; \n"
|
||||
"void main() \n"
|
||||
"{ \n"
|
||||
" vec3 rgb = texture(tex, texcoords_out).rgb; \n"
|
||||
" FragColor = vec4(rgb, 1.0); \n"
|
||||
"} \n";
|
||||
#else
|
||||
char fShaderStr[] =
|
||||
"#version 300 es \n"
|
||||
"precision mediump float; \n"
|
||||
"in vec2 texcoords_out; \n"
|
||||
"uniform sampler2D tex; \n"
|
||||
"out vec4 FragColor; \n"
|
||||
|
||||
"vec3 rgb2yuv(vec3 rgb){\n"
|
||||
" float y = 0.299*rgb.r + 0.587*rgb.g + 0.114*rgb.b;\n"
|
||||
" return vec3(y, 0.493*(rgb.b-y), 0.877*(rgb.r-y));\n"
|
||||
"}\n"
|
||||
|
||||
"vec3 yuv2rgb(vec3 yuv){\n"
|
||||
" float y = yuv.x;\n"
|
||||
" float u = yuv.y;\n"
|
||||
" float v = yuv.z;\n"
|
||||
" \n"
|
||||
" return vec3(\n"
|
||||
" y + 1.0/0.877*v,\n"
|
||||
" y - 0.39393*u - 0.58081*v,\n"
|
||||
" y + 1.0/0.493*u\n"
|
||||
" );\n"
|
||||
"}\n"
|
||||
|
||||
"void main() \n"
|
||||
"{ \n"
|
||||
" float s = 0.5;\n"
|
||||
" vec3 lum = texture(tex, texcoords_out).rgb;\n"
|
||||
" vec3 chr = texture(tex, floor(texcoords_out*s-.5)/s).rgb;\n"
|
||||
" vec3 rgb = vec3(rgb2yuv(lum).x, rgb2yuv(chr).yz);\n"
|
||||
" FragColor = vec4(rgb, 1.0); \n"
|
||||
"} \n";
|
||||
#endif
|
||||
|
||||
shader_program = esLoadProgram(cap_xcomp, vShaderStr, fShaderStr);
|
||||
if (shader_program == 0) {
|
||||
fprintf(stderr, "failed to create shader!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
cap_xcomp->egl.glBindAttribLocation(shader_program, 0, "pos");
|
||||
cap_xcomp->egl.glBindAttribLocation(shader_program, 1, "texcoords");
|
||||
return;
|
||||
}
|
||||
|
||||
#define GL_FLOAT 0x1406
|
||||
#define GL_FALSE 0
|
||||
#define GL_TRUE 1
|
||||
#define GL_TRIANGLES 0x0004
|
||||
#define DRM_FORMAT_MOD_INVALID 72057594037927935
|
||||
|
||||
static int gsr_capture_xcomposite_drm_start(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
gsr_capture_xcomposite_drm *cap_xcomp = cap->priv;
|
||||
|
||||
XWindowAttributes attr;
|
||||
if(!XGetWindowAttributes(cap_xcomp->dpy, cap_xcomp->params.window, &attr)) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_start failed: invalid window id: %lu\n", cap_xcomp->params.window);
|
||||
return -1;
|
||||
}
|
||||
|
||||
cap_xcomp->window_size.x = max_int(attr.width, 0);
|
||||
cap_xcomp->window_size.y = max_int(attr.height, 0);
|
||||
Window c;
|
||||
XTranslateCoordinates(cap_xcomp->dpy, cap_xcomp->params.window, DefaultRootWindow(cap_xcomp->dpy), 0, 0, &cap_xcomp->window_pos.x, &cap_xcomp->window_pos.y, &c);
|
||||
|
||||
// TODO: Get select and add these on top of it and then restore at the end. Also do the same in other xcomposite
|
||||
XSelectInput(cap_xcomp->dpy, cap_xcomp->params.window, StructureNotifyMask | ExposureMask);
|
||||
|
||||
if(!gsr_egl_load(&cap_xcomp->egl, cap_xcomp->dpy)) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_start: failed to load opengl\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Disable vsync */
|
||||
cap_xcomp->egl.eglSwapInterval(cap_xcomp->egl.egl_display, 0);
|
||||
#if 0
|
||||
// TODO: Fallback to composite window
|
||||
if(window_texture_init(&cap_xcomp->window_texture, cap_xcomp->dpy, cap_xcomp->params.window, &cap_xcomp->gl) != 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_start: failed get window texture for window %ld\n", cap_xcomp->params.window);
|
||||
gsr_egl_unload(&cap_xcomp->egl);
|
||||
return -1;
|
||||
}
|
||||
|
||||
cap_xcomp->egl.glBindTexture(GL_TEXTURE_2D, window_texture_get_opengl_texture_id(&cap_xcomp->window_texture));
|
||||
cap_xcomp->texture_size.x = 0;
|
||||
cap_xcomp->texture_size.y = 0;
|
||||
cap_xcomp->egl.glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &cap_xcomp->texture_size.x);
|
||||
cap_xcomp->egl.glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &cap_xcomp->texture_size.y);
|
||||
cap_xcomp->egl.glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
||||
cap_xcomp->texture_size.x = max_int(2, cap_xcomp->texture_size.x & ~1);
|
||||
cap_xcomp->texture_size.y = max_int(2, cap_xcomp->texture_size.y & ~1);
|
||||
|
||||
cap_xcomp->target_texture_id = gl_create_texture(cap_xcomp, cap_xcomp->texture_size.x, cap_xcomp->texture_size.y);
|
||||
if(cap_xcomp->target_texture_id == 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_start: failed to create opengl texture\n");
|
||||
gsr_capture_xcomposite_stop(cap, video_codec_context);
|
||||
return -1;
|
||||
}
|
||||
|
||||
video_codec_context->width = cap_xcomp->texture_size.x;
|
||||
video_codec_context->height = cap_xcomp->texture_size.y;
|
||||
|
||||
cap_xcomp->window_resize_timer = clock_get_monotonic_seconds();
|
||||
return 0;
|
||||
#else
|
||||
// TODO: Fallback to composite window
|
||||
if(window_texture_init(&cap_xcomp->window_texture, cap_xcomp->dpy, cap_xcomp->params.window, &cap_xcomp->egl) != 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_drm_start: failed get window texture for window %ld\n", cap_xcomp->params.window);
|
||||
gsr_egl_unload(&cap_xcomp->egl);
|
||||
return -1;
|
||||
}
|
||||
|
||||
cap_xcomp->egl.glBindTexture(GL_TEXTURE_2D, window_texture_get_opengl_texture_id(&cap_xcomp->window_texture));
|
||||
cap_xcomp->texture_size.x = 0;
|
||||
cap_xcomp->texture_size.y = 0;
|
||||
cap_xcomp->egl.glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &cap_xcomp->texture_size.x);
|
||||
cap_xcomp->egl.glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &cap_xcomp->texture_size.y);
|
||||
cap_xcomp->egl.glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
||||
#if 1
|
||||
cap_xcomp->target_texture_id = gl_create_texture(cap_xcomp, cap_xcomp->texture_size.x, cap_xcomp->texture_size.y);
|
||||
if(cap_xcomp->target_texture_id == 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_drm_start: failed to create opengl texture\n");
|
||||
return -1;
|
||||
}
|
||||
#else
|
||||
// TODO:
|
||||
cap_xcomp->target_texture_id = window_texture_get_opengl_texture_id(&cap_xcomp->window_texture);
|
||||
#endif
|
||||
|
||||
cap_xcomp->texture_size.x = max_int(2, cap_xcomp->texture_size.x & ~1);
|
||||
cap_xcomp->texture_size.y = max_int(2, cap_xcomp->texture_size.y & ~1);
|
||||
|
||||
video_codec_context->width = cap_xcomp->texture_size.x;
|
||||
video_codec_context->height = cap_xcomp->texture_size.y;
|
||||
|
||||
{
|
||||
EGLImage img = cap_xcomp->egl.eglCreateImage(cap_xcomp->egl.egl_display, cap_xcomp->egl.egl_context, EGL_GL_TEXTURE_2D, (EGLClientBuffer)(uint64_t)cap_xcomp->target_texture_id, NULL);
|
||||
if(!img) {
|
||||
fprintf(stderr, "eglCreateImage failed\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if(!cap_xcomp->egl.eglExportDMABUFImageQueryMESA(cap_xcomp->egl.egl_display, img, &cap_xcomp->fourcc, &cap_xcomp->num_planes, &cap_xcomp->modifiers) || cap_xcomp->modifiers == DRM_FORMAT_MOD_INVALID) {
|
||||
fprintf(stderr, "eglExportDMABUFImageQueryMESA failed\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if(cap_xcomp->num_planes != 1) {
|
||||
// TODO: FAIL!
|
||||
fprintf(stderr, "Blablalba\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if(!cap_xcomp->egl.eglExportDMABUFImageMESA(cap_xcomp->egl.egl_display, img, &cap_xcomp->dmabuf_fd, &cap_xcomp->stride, &cap_xcomp->offset)) {
|
||||
fprintf(stderr, "eglExportDMABUFImageMESA failed\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
fprintf(stderr, "texture: %u, dmabuf: %d, stride: %d, offset: %d\n", cap_xcomp->target_texture_id, cap_xcomp->dmabuf_fd, cap_xcomp->stride, cap_xcomp->offset);
|
||||
fprintf(stderr, "fourcc: %d, num planes: %d, modifiers: %zu\n", cap_xcomp->fourcc, cap_xcomp->num_planes, cap_xcomp->modifiers);
|
||||
}
|
||||
|
||||
cap_xcomp->egl.glGenFramebuffers(1, &cap_xcomp->FramebufferName);
|
||||
cap_xcomp->egl.glBindFramebuffer(GL_FRAMEBUFFER, cap_xcomp->FramebufferName);
|
||||
|
||||
cap_xcomp->egl.glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, cap_xcomp->target_texture_id, 0);
|
||||
|
||||
// Set the list of draw buffers.
|
||||
unsigned int DrawBuffers[1] = {GL_COLOR_ATTACHMENT0};
|
||||
cap_xcomp->egl.glDrawBuffers(1, DrawBuffers); // "1" is the size of DrawBuffers
|
||||
|
||||
if(cap_xcomp->egl.glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
|
||||
fprintf(stderr, "Failed to setup framebuffer\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
cap_xcomp->egl.glBindFramebuffer(GL_FRAMEBUFFER, 0);
|
||||
|
||||
//cap_xcomp->egl.glGenVertexArrays(1, &cap_xcomp->quad_VertexArrayID);
|
||||
//cap_xcomp->egl.glBindVertexArray(cap_xcomp->quad_VertexArrayID);
|
||||
|
||||
static const float g_quad_vertex_buffer_data[] = {
|
||||
-1.0f, -1.0f, 0.0f,
|
||||
1.0f, -1.0f, 0.0f,
|
||||
-1.0f, 1.0f, 0.0f,
|
||||
-1.0f, 1.0f, 0.0f,
|
||||
1.0f, -1.0f, 0.0f,
|
||||
1.0f, 1.0f, 0.0f,
|
||||
};
|
||||
|
||||
//cap_xcomp->egl.glGenBuffers(1, &cap_xcomp->quad_vertexbuffer);
|
||||
//cap_xcomp->egl.glBindBuffer(GL_ARRAY_BUFFER, cap_xcomp->quad_vertexbuffer);
|
||||
//cap_xcomp->egl.glBufferData(GL_ARRAY_BUFFER, sizeof(g_quad_vertex_buffer_data), g_quad_vertex_buffer_data, GL_STATIC_DRAW);
|
||||
|
||||
// Create and compile our GLSL program from the shaders
|
||||
LoadShaders(cap_xcomp);
|
||||
texID = cap_xcomp->egl.glGetUniformLocation(shader_program, "tex");
|
||||
fprintf(stderr, "uniform id: %u\n", texID);
|
||||
|
||||
float vVertices[] = {
|
||||
-1.0f, 1.0f, 0.0f, 1.0f,
|
||||
-1.0f, -1.0f, 0.0f, 0.0f,
|
||||
1.0f, -1.0f, 1.0f, 0.0f,
|
||||
|
||||
-1.0f, 1.0f, 0.0f, 1.0f,
|
||||
1.0f, -1.0f, 1.0f, 0.0f,
|
||||
1.0f, 1.0f, 1.0f, 1.0f
|
||||
};
|
||||
|
||||
unsigned int quadVBO;
|
||||
cap_xcomp->egl.glGenVertexArrays(1, &cap_xcomp->quadVAO);
|
||||
cap_xcomp->egl.glGenBuffers(1, &quadVBO);
|
||||
cap_xcomp->egl.glBindVertexArray(cap_xcomp->quadVAO);
|
||||
cap_xcomp->egl.glBindBuffer(GL_ARRAY_BUFFER, quadVBO);
|
||||
cap_xcomp->egl.glBufferData(GL_ARRAY_BUFFER, sizeof(vVertices), &vVertices, GL_STATIC_DRAW);
|
||||
|
||||
cap_xcomp->egl.glEnableVertexAttribArray(0);
|
||||
cap_xcomp->egl.glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(float), (void*)0);
|
||||
|
||||
cap_xcomp->egl.glEnableVertexAttribArray(1);
|
||||
cap_xcomp->egl.glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(float), (void*)(2 * sizeof(float)));
|
||||
|
||||
cap_xcomp->egl.glBindVertexArray(0);
|
||||
|
||||
//cap_xcomp->egl.glUniform1i(texID, window_texture_get_opengl_texture_id(&cap_xcomp->window_texture));
|
||||
|
||||
//cap_xcomp->egl.glViewport(0, 0, 1920, 1080);
|
||||
|
||||
//cap_xcomp->egl.glBindBuffer(GL_ARRAY_BUFFER, 0);
|
||||
//cap_xcomp->egl.glBindVertexArray(0);
|
||||
|
||||
if(!drm_create_codec_context(cap_xcomp, video_codec_context)) {
|
||||
fprintf(stderr, "failed to create hw codec context\n");
|
||||
gsr_egl_unload(&cap_xcomp->egl);
|
||||
return -1;
|
||||
}
|
||||
|
||||
fprintf(stderr, "sneed: %u\n", cap_xcomp->FramebufferName);
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
// TODO:
|
||||
static void free_desc(void *opaque, uint8_t *data) {
|
||||
AVDRMFrameDescriptor *desc = (AVDRMFrameDescriptor*)data;
|
||||
int i;
|
||||
|
||||
//for (i = 0; i < desc->nb_objects; i++)
|
||||
// close(desc->objects[i].fd);
|
||||
|
||||
av_free(desc);
|
||||
}
|
||||
|
||||
|
||||
static void gsr_capture_xcomposite_drm_tick(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame **frame) {
|
||||
gsr_capture_xcomposite_drm *cap_xcomp = cap->priv;
|
||||
|
||||
if(!cap_xcomp->created_hw_frame) {
|
||||
cap_xcomp->created_hw_frame = true;
|
||||
|
||||
/*if(av_hwframe_get_buffer(video_codec_context->hw_frames_ctx, *frame, 0) < 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_drm_tick: av_hwframe_get_buffer failed\n");
|
||||
return;
|
||||
}*/
|
||||
|
||||
AVDRMFrameDescriptor *desc = av_malloc(sizeof(AVDRMFrameDescriptor));
|
||||
if(!desc) {
|
||||
fprintf(stderr, "poop\n");
|
||||
return;
|
||||
}
|
||||
|
||||
fprintf(stderr, "tick fd: %d\n", cap_xcomp->dmabuf_fd);
|
||||
|
||||
cap_xcomp->egl.glBindTexture(GL_TEXTURE_2D, cap_xcomp->target_texture_id);
|
||||
int xx = 0;
|
||||
int yy = 0;
|
||||
cap_xcomp->egl.glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &xx);
|
||||
cap_xcomp->egl.glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &yy);
|
||||
cap_xcomp->egl.glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
||||
*desc = (AVDRMFrameDescriptor) {
|
||||
.nb_objects = 1,
|
||||
.objects[0] = {
|
||||
.fd = cap_xcomp->dmabuf_fd,
|
||||
.size = yy * cap_xcomp->stride,
|
||||
.format_modifier = cap_xcomp->modifiers,
|
||||
},
|
||||
.nb_layers = 1,
|
||||
.layers[0] = {
|
||||
.format = cap_xcomp->fourcc, // DRM_FORMAT_NV12
|
||||
.nb_planes = 1, //cap_xcomp->num_planes, // TODO: Ensure this is 1, otherwise ffmpeg cant handle it in av_hwframe_map
|
||||
.planes[0] = {
|
||||
.object_index = 0,
|
||||
.offset = cap_xcomp->offset,
|
||||
.pitch = cap_xcomp->stride,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
#if 0
|
||||
AVBufferRef *device_ctx;
|
||||
if(av_hwdevice_ctx_create(&device_ctx, AV_HWDEVICE_TYPE_DRM, "/dev/dri/card0", NULL, 0) < 0) {
|
||||
fprintf(stderr, "Error: Failed to create hardware device context\n");
|
||||
return;
|
||||
}
|
||||
|
||||
AVBufferRef *frame_context = av_hwframe_ctx_alloc(device_ctx);
|
||||
if(!frame_context) {
|
||||
fprintf(stderr, "Error: Failed to create hwframe context\n");
|
||||
av_buffer_unref(&device_ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
AVHWFramesContext *hw_frame_context =
|
||||
(AVHWFramesContext *)frame_context->data;
|
||||
hw_frame_context->width = video_codec_context->width;
|
||||
hw_frame_context->height = video_codec_context->height;
|
||||
hw_frame_context->sw_format = AV_PIX_FMT_0RGB32;
|
||||
hw_frame_context->format = AV_PIX_FMT_DRM_PRIME;
|
||||
hw_frame_context->device_ref = device_ctx;
|
||||
hw_frame_context->device_ctx = (AVHWDeviceContext*)device_ctx->data;
|
||||
|
||||
if (av_hwframe_ctx_init(frame_context) < 0) {
|
||||
fprintf(stderr, "Error: Failed to initialize hardware frame context "
|
||||
"(note: ffmpeg version needs to be > 4.0)\n");
|
||||
av_buffer_unref(&device_ctx);
|
||||
av_buffer_unref(&frame_context);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
av_frame_free(frame);
|
||||
*frame = av_frame_alloc();
|
||||
if(!frame) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_tick: failed to allocate frame\n");
|
||||
return;
|
||||
}
|
||||
(*frame)->format = video_codec_context->pix_fmt;
|
||||
(*frame)->width = video_codec_context->width;
|
||||
(*frame)->height = video_codec_context->height;
|
||||
(*frame)->color_range = AVCOL_RANGE_JPEG;
|
||||
|
||||
int res = av_hwframe_get_buffer(video_codec_context->hw_frames_ctx, *frame, 0);
|
||||
if(res < 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_tick: av_hwframe_get_buffer failed 1: %d\n", res);
|
||||
return;
|
||||
}
|
||||
|
||||
AVFrame *src_frame = av_frame_alloc();
|
||||
assert(src_frame);
|
||||
src_frame->format = AV_PIX_FMT_DRM_PRIME;
|
||||
src_frame->width = video_codec_context->width;
|
||||
src_frame->height = video_codec_context->height;
|
||||
src_frame->color_range = AVCOL_RANGE_JPEG;
|
||||
|
||||
src_frame->buf[0] = av_buffer_create((uint8_t*)desc, sizeof(*desc),
|
||||
&free_desc, video_codec_context, 0);
|
||||
if (!src_frame->buf[0]) {
|
||||
fprintf(stderr, "failed to create buffer!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
src_frame->data[0] = (uint8_t*)desc;
|
||||
src_frame->extended_data = src_frame->data;
|
||||
src_frame->format = AV_PIX_FMT_DRM_PRIME;
|
||||
|
||||
res = av_hwframe_map(*frame, src_frame, AV_HWFRAME_MAP_DIRECT);
|
||||
if(res < 0) {
|
||||
fprintf(stderr, "av_hwframe_map failed: %d\n", res);
|
||||
}
|
||||
}
|
||||
|
||||
cap_xcomp->egl.glClear(GL_COLOR_BUFFER_BIT);
|
||||
}
|
||||
|
||||
static bool gsr_capture_xcomposite_drm_should_stop(gsr_capture *cap, bool *err) {
|
||||
return false;
|
||||
}
|
||||
|
||||
#define GL_FLOAT 0x1406
|
||||
#define GL_FALSE 0
|
||||
#define GL_TRUE 1
|
||||
#define GL_TRIANGLES 0x0004
|
||||
|
||||
void FBO_2_PPM_file(gsr_capture_xcomposite_drm *cap_xcomp, int output_width, int output_height)
|
||||
{
|
||||
FILE *output_image;
|
||||
|
||||
/// READ THE PIXELS VALUES from FBO AND SAVE TO A .PPM FILE
|
||||
int i, j, k;
|
||||
unsigned char *pixels = (unsigned char*)malloc(output_width*output_height*3);
|
||||
|
||||
unsigned int err = cap_xcomp->egl.glGetError();
|
||||
fprintf(stderr, "opengl err 1: %u\n", err);
|
||||
|
||||
/// READ THE CONTENT FROM THE FBO
|
||||
cap_xcomp->egl.glReadBuffer(GL_COLOR_ATTACHMENT0);
|
||||
|
||||
err = cap_xcomp->egl.glGetError();
|
||||
fprintf(stderr, "opengl err 2: %u\n", err);
|
||||
|
||||
cap_xcomp->egl.glReadPixels(0, 0, output_width, output_height, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
|
||||
|
||||
err = cap_xcomp->egl.glGetError();
|
||||
fprintf(stderr, "opengl err 3: %u\n", err);
|
||||
|
||||
output_image = fopen("output.ppm", "wb");
|
||||
fprintf(output_image,"P3\n");
|
||||
fprintf(output_image,"# Created by Ricao\n");
|
||||
fprintf(output_image,"%d %d\n",output_width,output_height);
|
||||
fprintf(output_image,"255\n");
|
||||
|
||||
k = 0;
|
||||
for(i=0; i<output_width; i++)
|
||||
{
|
||||
for(j=0; j<output_height; j++)
|
||||
{
|
||||
fprintf(output_image,"%u %u %u ",(unsigned int)pixels[k],(unsigned int)pixels[k+1],
|
||||
(unsigned int)pixels[k+2]);
|
||||
k = k+4;
|
||||
}
|
||||
fprintf(output_image,"\n");
|
||||
}
|
||||
free(pixels);
|
||||
fclose(output_image);
|
||||
}
|
||||
|
||||
static int gsr_capture_xcomposite_drm_capture(gsr_capture *cap, AVFrame *frame) {
|
||||
gsr_capture_xcomposite_drm *cap_xcomp = cap->priv;
|
||||
vec2i source_size = cap_xcomp->texture_size;
|
||||
|
||||
#if 1
|
||||
// Requires opengl 4.2... TODO: Replace with earlier opengl if opengl < 4.2.
|
||||
cap_xcomp->egl.glCopyImageSubData(
|
||||
window_texture_get_opengl_texture_id(&cap_xcomp->window_texture), GL_TEXTURE_2D, 0, 0, 0, 0,
|
||||
cap_xcomp->target_texture_id, GL_TEXTURE_2D, 0, 0, 0, 0,
|
||||
source_size.x, source_size.y, 1);
|
||||
unsigned int err = cap_xcomp->egl.glGetError();
|
||||
if(err != 0) {
|
||||
static bool error_shown = false;
|
||||
if(!error_shown) {
|
||||
error_shown = true;
|
||||
fprintf(stderr, "Error: glCopyImageSubData failed, gl error: %d\n", err);
|
||||
}
|
||||
}
|
||||
#elif 0
|
||||
cap_xcomp->egl.glBindFramebuffer(GL_FRAMEBUFFER, cap_xcomp->FramebufferName);
|
||||
cap_xcomp->egl.glViewport(0, 0, 1920, 1080);
|
||||
//cap_xcomp->egl.glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
|
||||
cap_xcomp->egl.glClear(GL_COLOR_BUFFER_BIT);
|
||||
|
||||
cap_xcomp->egl.glUseProgram(shader_program);
|
||||
cap_xcomp->egl.glBindTexture(GL_TEXTURE_2D, window_texture_get_opengl_texture_id(&cap_xcomp->window_texture));
|
||||
cap_xcomp->egl.glBindVertexArray(cap_xcomp->quadVAO);
|
||||
cap_xcomp->egl.glDrawArrays(GL_TRIANGLES, 0, 6);
|
||||
cap_xcomp->egl.glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
||||
static int counter = 0;
|
||||
++counter;
|
||||
static bool image_saved = false;
|
||||
if(!image_saved && counter == 5) {
|
||||
image_saved = true;
|
||||
FBO_2_PPM_file(cap_xcomp, 1920, 1080);
|
||||
fprintf(stderr, "saved image!\n");
|
||||
}
|
||||
|
||||
cap_xcomp->egl.glBindVertexArray(0);
|
||||
cap_xcomp->egl.glUseProgram(0);
|
||||
cap_xcomp->egl.glBindFramebuffer(GL_FRAMEBUFFER, 0);
|
||||
#endif
|
||||
cap_xcomp->egl.eglSwapBuffers(cap_xcomp->egl.egl_display, cap_xcomp->egl.egl_surface);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gsr_capture_xcomposite_drm_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
if(cap->priv) {
|
||||
free(cap->priv);
|
||||
cap->priv = NULL;
|
||||
}
|
||||
free(cap);
|
||||
}
|
||||
|
||||
gsr_capture* gsr_capture_xcomposite_drm_create(const gsr_capture_xcomposite_drm_params *params) {
|
||||
if(!params) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_drm_create params is NULL\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
gsr_capture *cap = calloc(1, sizeof(gsr_capture));
|
||||
if(!cap)
|
||||
return NULL;
|
||||
|
||||
gsr_capture_xcomposite_drm *cap_xcomp = calloc(1, sizeof(gsr_capture_xcomposite_drm));
|
||||
if(!cap_xcomp) {
|
||||
free(cap);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Display *display = XOpenDisplay(NULL);
|
||||
if(!display) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_drm_create failed: XOpenDisplay failed\n");
|
||||
free(cap);
|
||||
free(cap_xcomp);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
cap_xcomp->dpy = display;
|
||||
cap_xcomp->params = *params;
|
||||
|
||||
*cap = (gsr_capture) {
|
||||
.start = gsr_capture_xcomposite_drm_start,
|
||||
.tick = gsr_capture_xcomposite_drm_tick,
|
||||
.should_stop = gsr_capture_xcomposite_drm_should_stop,
|
||||
.capture = gsr_capture_xcomposite_drm_capture,
|
||||
.destroy = gsr_capture_xcomposite_drm_destroy,
|
||||
.priv = cap_xcomp
|
||||
};
|
||||
|
||||
return cap;
|
||||
}
|
275
src/egl.c
Normal file
275
src/egl.c
Normal file
@ -0,0 +1,275 @@
|
||||
#include "../include/egl.h"
|
||||
#include "../include/library_loader.h"
|
||||
#include <string.h>
|
||||
|
||||
static bool gsr_egl_create_window(gsr_egl *self) {
|
||||
EGLDisplay egl_display = NULL;
|
||||
EGLConfig ecfg;
|
||||
int32_t num_config;
|
||||
EGLSurface egl_surface;
|
||||
EGLContext egl_context;
|
||||
|
||||
int32_t attr[] = {
|
||||
EGL_BUFFER_SIZE, 24,
|
||||
EGL_RENDERABLE_TYPE,
|
||||
EGL_OPENGL_ES2_BIT,
|
||||
EGL_NONE
|
||||
};
|
||||
|
||||
int32_t ctxattr[] = {
|
||||
EGL_CONTEXT_CLIENT_VERSION, 2,
|
||||
EGL_NONE
|
||||
};
|
||||
|
||||
// TODO: Is there a way to remove the need to create a window?
|
||||
Window window = XCreateWindow(self->dpy, DefaultRootWindow(self->dpy), 0, 0, 1, 1, 0, CopyFromParent, InputOutput, CopyFromParent, 0, NULL);
|
||||
|
||||
if(!window) {
|
||||
fprintf(stderr, "gsr error: gsr_gl_create_window failed: failed to create gl window\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
egl_display = self->eglGetDisplay(self->dpy);
|
||||
if(!egl_display) {
|
||||
fprintf(stderr, "gsr error: gsr_egl_create_window failed: eglGetDisplay failed\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if(!self->eglInitialize(egl_display, NULL, NULL)) {
|
||||
fprintf(stderr, "gsr error: gsr_egl_create_window failed: eglInitialize failed\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
// TODO: Cleanup ecfg?
|
||||
if (!self->eglChooseConfig( egl_display, attr, &ecfg, 1, &num_config ) ) {
|
||||
//cerr << "Failed to choose config (eglError: " << eglGetError() << ")" << endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
if ( num_config != 1 ) {
|
||||
//cerr << "Didn't get exactly one config, but " << num_config << endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
egl_surface = self->eglCreateWindowSurface ( egl_display, ecfg, (EGLNativeWindowType)window, NULL );
|
||||
if ( !egl_surface ) {
|
||||
//cerr << "Unable to create EGL surface (eglError: " << eglGetError() << ")" << endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
//// egl-contexts collect all state descriptions needed required for operation
|
||||
egl_context = self->eglCreateContext ( egl_display, ecfg, NULL, ctxattr );
|
||||
if ( !egl_context ) {
|
||||
//cerr << "Unable to create EGL context (eglError: " << eglGetError() << ")" << endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
//// associate the egl-context with the egl-surface
|
||||
self->eglMakeCurrent( egl_display, egl_surface, egl_surface, egl_context );
|
||||
|
||||
self->egl_display = egl_display;
|
||||
self->egl_surface = egl_surface;
|
||||
self->egl_context = egl_context;
|
||||
self->window = window;
|
||||
return true;
|
||||
|
||||
fail:
|
||||
// TODO:
|
||||
/*
|
||||
if(window)
|
||||
XDestroyWindow(self->dpy, window);
|
||||
if(colormap)
|
||||
XFreeColormap(self->dpy, colormap);
|
||||
if(gl_context)
|
||||
self->glXDestroyContext(self->dpy, gl_context);
|
||||
if(visual_info)
|
||||
XFree(visual_info);
|
||||
XFree(fbconfigs);
|
||||
*/
|
||||
return False;
|
||||
}
|
||||
|
||||
static bool gsr_egl_load_egl(gsr_egl *self, void *library) {
|
||||
dlsym_assign required_dlsym[] = {
|
||||
{ (void**)&self->eglGetDisplay, "eglGetDisplay" },
|
||||
{ (void**)&self->eglInitialize, "eglInitialize" },
|
||||
{ (void**)&self->eglChooseConfig, "eglChooseConfig" },
|
||||
{ (void**)&self->eglCreateWindowSurface, "eglCreateWindowSurface" },
|
||||
{ (void**)&self->eglCreateContext, "eglCreateContext" },
|
||||
{ (void**)&self->eglMakeCurrent, "eglMakeCurrent" },
|
||||
{ (void**)&self->eglCreatePixmapSurface, "eglCreatePixmapSurface" },
|
||||
{ (void**)&self->eglCreateImage, "eglCreateImage" }, // TODO: eglCreateImageKHR
|
||||
{ (void**)&self->eglBindTexImage, "eglBindTexImage" },
|
||||
{ (void**)&self->eglSwapInterval, "eglSwapInterval" },
|
||||
{ (void**)&self->eglSwapBuffers, "eglSwapBuffers" },
|
||||
{ (void**)&self->eglGetProcAddress, "eglGetProcAddress" },
|
||||
|
||||
{ NULL, NULL }
|
||||
};
|
||||
|
||||
if(!dlsym_load_list(library, required_dlsym)) {
|
||||
fprintf(stderr, "gsr error: gsr_egl_load failed: missing required symbols in libEGL.so.1\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool gsr_egl_mesa_load_egl(gsr_egl *self) {
|
||||
self->eglExportDMABUFImageQueryMESA = self->eglGetProcAddress("eglExportDMABUFImageQueryMESA");
|
||||
self->eglExportDMABUFImageMESA = self->eglGetProcAddress("eglExportDMABUFImageMESA");
|
||||
self->glEGLImageTargetTexture2DOES = self->eglGetProcAddress("glEGLImageTargetTexture2DOES");
|
||||
|
||||
if(!self->eglExportDMABUFImageQueryMESA) {
|
||||
fprintf(stderr, "could not find eglExportDMABUFImageQueryMESA\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if(!self->eglExportDMABUFImageMESA) {
|
||||
fprintf(stderr, "could not find eglExportDMABUFImageMESA\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if(!self->glEGLImageTargetTexture2DOES) {
|
||||
fprintf(stderr, "could not find glEGLImageTargetTexture2DOES\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool gsr_egl_load_gl(gsr_egl *self, void *library) {
|
||||
dlsym_assign required_dlsym[] = {
|
||||
{ (void**)&self->glGetError, "glGetError" },
|
||||
{ (void**)&self->glGetString, "glGetString" },
|
||||
{ (void**)&self->glClear, "glClear" },
|
||||
{ (void**)&self->glClearColor, "glClearColor" },
|
||||
{ (void**)&self->glGenTextures, "glGenTextures" },
|
||||
{ (void**)&self->glDeleteTextures, "glDeleteTextures" },
|
||||
{ (void**)&self->glBindTexture, "glBindTexture" },
|
||||
{ (void**)&self->glTexParameteri, "glTexParameteri" },
|
||||
{ (void**)&self->glGetTexLevelParameteriv, "glGetTexLevelParameteriv" },
|
||||
{ (void**)&self->glTexImage2D, "glTexImage2D" },
|
||||
{ (void**)&self->glCopyImageSubData, "glCopyImageSubData" },
|
||||
{ (void**)&self->glGenFramebuffers, "glGenFramebuffers" },
|
||||
{ (void**)&self->glBindFramebuffer, "glBindFramebuffer" },
|
||||
{ (void**)&self->glViewport, "glViewport" },
|
||||
{ (void**)&self->glFramebufferTexture2D, "glFramebufferTexture2D" },
|
||||
{ (void**)&self->glDrawBuffers, "glDrawBuffers" },
|
||||
{ (void**)&self->glCheckFramebufferStatus, "glCheckFramebufferStatus" },
|
||||
{ (void**)&self->glBindBuffer, "glBindBuffer" },
|
||||
{ (void**)&self->glGenBuffers, "glGenBuffers" },
|
||||
{ (void**)&self->glBufferData, "glBufferData" },
|
||||
{ (void**)&self->glGetUniformLocation, "glGetUniformLocation" },
|
||||
{ (void**)&self->glGenVertexArrays, "glGenVertexArrays" },
|
||||
{ (void**)&self->glBindVertexArray, "glBindVertexArray" },
|
||||
{ (void**)&self->glCreateProgram, "glCreateProgram" },
|
||||
{ (void**)&self->glCreateShader, "glCreateShader" },
|
||||
{ (void**)&self->glAttachShader, "glAttachShader" },
|
||||
{ (void**)&self->glBindAttribLocation, "glBindAttribLocation" },
|
||||
{ (void**)&self->glCompileShader, "glCompileShader" },
|
||||
{ (void**)&self->glLinkProgram, "glLinkProgram" },
|
||||
{ (void**)&self->glShaderSource, "glShaderSource" },
|
||||
{ (void**)&self->glUseProgram, "glUseProgram" },
|
||||
{ (void**)&self->glGetProgramInfoLog, "glGetProgramInfoLog" },
|
||||
{ (void**)&self->glGetShaderiv, "glGetShaderiv" },
|
||||
{ (void**)&self->glGetShaderInfoLog, "glGetShaderInfoLog" },
|
||||
{ (void**)&self->glGetShaderSource, "glGetShaderSource" },
|
||||
{ (void**)&self->glDeleteProgram, "glDeleteProgram" },
|
||||
{ (void**)&self->glDeleteShader, "glDeleteShader" },
|
||||
{ (void**)&self->glGetProgramiv, "glGetProgramiv" },
|
||||
{ (void**)&self->glVertexAttribPointer, "glVertexAttribPointer" },
|
||||
{ (void**)&self->glEnableVertexAttribArray, "glEnableVertexAttribArray" },
|
||||
{ (void**)&self->glDrawArrays, "glDrawArrays" },
|
||||
{ (void**)&self->glReadBuffer, "glReadBuffer" },
|
||||
{ (void**)&self->glReadPixels, "glReadPixels" },
|
||||
|
||||
{ NULL, NULL }
|
||||
};
|
||||
|
||||
if(!dlsym_load_list(library, required_dlsym)) {
|
||||
fprintf(stderr, "gsr error: gsr_egl_load failed: missing required symbols in libGL.so.1\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool gsr_egl_load(gsr_egl *self, Display *dpy) {
|
||||
memset(self, 0, sizeof(gsr_egl));
|
||||
self->dpy = dpy;
|
||||
|
||||
dlerror(); /* clear */
|
||||
void *egl_lib = dlopen("libEGL.so.1", RTLD_LAZY);
|
||||
if(!egl_lib) {
|
||||
fprintf(stderr, "gsr error: gsr_egl_load: failed to load libEGL.so.1, error: %s\n", dlerror());
|
||||
return false;
|
||||
}
|
||||
|
||||
void *gl_lib = dlopen("libGL.so.1", RTLD_LAZY);
|
||||
if(!egl_lib) {
|
||||
fprintf(stderr, "gsr error: gsr_egl_load: failed to load libGL.so.1, error: %s\n", dlerror());
|
||||
dlclose(egl_lib);
|
||||
memset(self, 0, sizeof(gsr_egl));
|
||||
return false;
|
||||
}
|
||||
|
||||
if(!gsr_egl_load_egl(self, egl_lib)) {
|
||||
dlclose(egl_lib);
|
||||
dlclose(gl_lib);
|
||||
memset(self, 0, sizeof(gsr_egl));
|
||||
return false;
|
||||
}
|
||||
|
||||
if(!gsr_egl_load_gl(self, gl_lib)) {
|
||||
dlclose(egl_lib);
|
||||
dlclose(gl_lib);
|
||||
memset(self, 0, sizeof(gsr_egl));
|
||||
return false;
|
||||
}
|
||||
|
||||
if(!gsr_egl_mesa_load_egl(self)) {
|
||||
dlclose(egl_lib);
|
||||
dlclose(gl_lib);
|
||||
memset(self, 0, sizeof(gsr_egl));
|
||||
return false;
|
||||
}
|
||||
|
||||
if(!gsr_egl_create_window(self)) {
|
||||
dlclose(egl_lib);
|
||||
dlclose(gl_lib);
|
||||
memset(self, 0, sizeof(gsr_egl));
|
||||
return false;
|
||||
}
|
||||
|
||||
self->egl_library = egl_lib;
|
||||
self->gl_library = gl_lib;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool gsr_egl_make_context_current(gsr_egl *self) {
|
||||
// TODO:
|
||||
return true;
|
||||
//return self->glXMakeContextCurrent(self->dpy, self->window, self->window, self->gl_context);
|
||||
}
|
||||
|
||||
void gsr_egl_unload(gsr_egl *self) {
|
||||
// TODO: Cleanup of egl resources
|
||||
|
||||
if(self->window) {
|
||||
XDestroyWindow(self->dpy, self->window);
|
||||
self->window = None;
|
||||
}
|
||||
|
||||
if(self->egl_library) {
|
||||
dlclose(self->egl_library);
|
||||
self->egl_library = NULL;
|
||||
}
|
||||
|
||||
if(self->gl_library) {
|
||||
dlclose(self->gl_library);
|
||||
self->gl_library = NULL;
|
||||
}
|
||||
|
||||
memset(self, 0, sizeof(gsr_egl));
|
||||
}
|
198
src/gl.c
198
src/gl.c
@ -1,198 +0,0 @@
|
||||
#include "../include/gl.h"
|
||||
#include "../include/library_loader.h"
|
||||
#include <string.h>
|
||||
|
||||
static bool gsr_gl_create_window(gsr_gl *self) {
|
||||
const int attr[] = {
|
||||
GLX_RENDER_TYPE, GLX_RGBA_BIT,
|
||||
GLX_DRAWABLE_TYPE, GLX_WINDOW_BIT,
|
||||
GLX_DOUBLEBUFFER, True,
|
||||
GLX_RED_SIZE, 8,
|
||||
GLX_GREEN_SIZE, 8,
|
||||
GLX_BLUE_SIZE, 8,
|
||||
GLX_ALPHA_SIZE, 8,
|
||||
GLX_DEPTH_SIZE, 0,
|
||||
None
|
||||
};
|
||||
|
||||
GLXFBConfig *fbconfigs = NULL;
|
||||
XVisualInfo *visual_info = NULL;
|
||||
GLXFBConfig fbconfig = NULL;
|
||||
Colormap colormap = None;
|
||||
GLXContext gl_context = NULL;
|
||||
Window window = None;
|
||||
|
||||
int numfbconfigs = 0;
|
||||
fbconfigs = self->glXChooseFBConfig(self->dpy, DefaultScreen(self->dpy), attr, &numfbconfigs);
|
||||
for(int i = 0; i < numfbconfigs; i++) {
|
||||
visual_info = self->glXGetVisualFromFBConfig(self->dpy, fbconfigs[i]);
|
||||
if(!visual_info)
|
||||
continue;
|
||||
|
||||
fbconfig = fbconfigs[i];
|
||||
break;
|
||||
}
|
||||
|
||||
if(!visual_info) {
|
||||
fprintf(stderr, "gsr error: gsr_gl_create_window failed: no appropriate visual found\n");
|
||||
XFree(fbconfigs);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* TODO: Core profile? GLX_CONTEXT_CORE_PROFILE_BIT_ARB. */
|
||||
/* TODO: Remove need for 4.2 when copy texture function has been removed. */
|
||||
int context_attribs[] = {
|
||||
GLX_CONTEXT_MAJOR_VERSION_ARB, 4,
|
||||
GLX_CONTEXT_MINOR_VERSION_ARB, 2,
|
||||
GLX_CONTEXT_FLAGS_ARB, GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB,
|
||||
None
|
||||
};
|
||||
|
||||
gl_context = self->glXCreateContextAttribsARB(self->dpy, fbconfig, NULL, True, context_attribs);
|
||||
if(!gl_context) {
|
||||
fprintf(stderr, "gsr error: gsr_gl_create_window failed: failed to create gl context\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
colormap = XCreateColormap(self->dpy, DefaultRootWindow(self->dpy), visual_info->visual, AllocNone);
|
||||
if(!colormap) {
|
||||
fprintf(stderr, "gsr error: gsr_gl_create_window failed: failed to create x11 colormap\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
XSetWindowAttributes window_attr;
|
||||
window_attr.colormap = colormap;
|
||||
|
||||
// TODO: Is there a way to remove the need to create a window?
|
||||
window = XCreateWindow(self->dpy, DefaultRootWindow(self->dpy), 0, 0, 1, 1, 0, visual_info->depth, InputOutput, visual_info->visual, CWColormap, &window_attr);
|
||||
|
||||
if(!window) {
|
||||
fprintf(stderr, "gsr error: gsr_gl_create_window failed: failed to create gl window\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if(!self->glXMakeContextCurrent(self->dpy, window, window, gl_context)) {
|
||||
fprintf(stderr, "gsr error: gsr_gl_create_window failed: failed to make gl context current\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
self->fbconfigs = fbconfigs;
|
||||
self->visual_info = visual_info;
|
||||
self->colormap = colormap;
|
||||
self->gl_context = gl_context;
|
||||
self->window = window;
|
||||
return true;
|
||||
|
||||
fail:
|
||||
if(window)
|
||||
XDestroyWindow(self->dpy, window);
|
||||
if(colormap)
|
||||
XFreeColormap(self->dpy, colormap);
|
||||
if(gl_context)
|
||||
self->glXDestroyContext(self->dpy, gl_context);
|
||||
if(visual_info)
|
||||
XFree(visual_info);
|
||||
XFree(fbconfigs);
|
||||
return False;
|
||||
}
|
||||
|
||||
bool gsr_gl_load(gsr_gl *self, Display *dpy) {
|
||||
memset(self, 0, sizeof(gsr_gl));
|
||||
self->dpy = dpy;
|
||||
|
||||
dlerror(); /* clear */
|
||||
void *lib = dlopen("libGL.so.1", RTLD_LAZY);
|
||||
if(!lib) {
|
||||
fprintf(stderr, "gsr error: gsr_gl_load: failed to load libGL.so.1, error: %s\n", dlerror());
|
||||
return false;
|
||||
}
|
||||
|
||||
dlsym_assign optional_dlsym[] = {
|
||||
{ (void**)&self->glClearTexImage, "glClearTexImage" },
|
||||
{ (void**)&self->glXSwapIntervalEXT, "glXSwapIntervalEXT" },
|
||||
{ (void**)&self->glXSwapIntervalMESA, "glXSwapIntervalMESA" },
|
||||
{ (void**)&self->glXSwapIntervalSGI, "glXSwapIntervalSGI" },
|
||||
|
||||
{ NULL, NULL }
|
||||
};
|
||||
|
||||
dlsym_load_list_optional(lib, optional_dlsym);
|
||||
|
||||
dlsym_assign required_dlsym[] = {
|
||||
{ (void**)&self->glXCreatePixmap, "glXCreatePixmap" },
|
||||
{ (void**)&self->glXDestroyPixmap, "glXDestroyPixmap" },
|
||||
{ (void**)&self->glXBindTexImageEXT, "glXBindTexImageEXT" },
|
||||
{ (void**)&self->glXReleaseTexImageEXT, "glXReleaseTexImageEXT" },
|
||||
{ (void**)&self->glXChooseFBConfig, "glXChooseFBConfig" },
|
||||
{ (void**)&self->glXGetVisualFromFBConfig, "glXGetVisualFromFBConfig" },
|
||||
{ (void**)&self->glXCreateContextAttribsARB, "glXCreateContextAttribsARB" },
|
||||
{ (void**)&self->glXMakeContextCurrent, "glXMakeContextCurrent" },
|
||||
{ (void**)&self->glXDestroyContext, "glXDestroyContext" },
|
||||
{ (void**)&self->glXSwapBuffers, "glXSwapBuffers" },
|
||||
|
||||
{ (void**)&self->glGetError, "glGetError" },
|
||||
{ (void**)&self->glGetString, "glGetString" },
|
||||
{ (void**)&self->glClear, "glClear" },
|
||||
{ (void**)&self->glGenTextures, "glGenTextures" },
|
||||
{ (void**)&self->glDeleteTextures, "glDeleteTextures" },
|
||||
{ (void**)&self->glBindTexture, "glBindTexture" },
|
||||
{ (void**)&self->glTexParameteri, "glTexParameteri" },
|
||||
{ (void**)&self->glGetTexLevelParameteriv, "glGetTexLevelParameteriv" },
|
||||
{ (void**)&self->glTexImage2D, "glTexImage2D" },
|
||||
{ (void**)&self->glCopyImageSubData, "glCopyImageSubData" },
|
||||
|
||||
{ NULL, NULL }
|
||||
};
|
||||
|
||||
if(!dlsym_load_list(lib, required_dlsym)) {
|
||||
fprintf(stderr, "gsr error: gsr_gl_load failed: missing required symbols in libGL.so.1\n");
|
||||
dlclose(lib);
|
||||
memset(self, 0, sizeof(gsr_gl));
|
||||
return false;
|
||||
}
|
||||
|
||||
if(!gsr_gl_create_window(self)) {
|
||||
dlclose(lib);
|
||||
memset(self, 0, sizeof(gsr_gl));
|
||||
return false;
|
||||
}
|
||||
|
||||
self->library = lib;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool gsr_gl_make_context_current(gsr_gl *self) {
|
||||
return self->glXMakeContextCurrent(self->dpy, self->window, self->window, self->gl_context);
|
||||
}
|
||||
|
||||
void gsr_gl_unload(gsr_gl *self) {
|
||||
if(self->window) {
|
||||
XDestroyWindow(self->dpy, self->window);
|
||||
self->window = None;
|
||||
}
|
||||
|
||||
if(self->colormap) {
|
||||
XFreeColormap(self->dpy, self->colormap);
|
||||
self->colormap = None;
|
||||
}
|
||||
|
||||
if(self->gl_context) {
|
||||
self->glXDestroyContext(self->dpy, self->gl_context);
|
||||
self->gl_context = NULL;
|
||||
}
|
||||
|
||||
if(self->visual_info) {
|
||||
XFree(self->visual_info);
|
||||
self->visual_info = NULL;
|
||||
}
|
||||
|
||||
if(self->fbconfigs) {
|
||||
XFree(self->fbconfigs);
|
||||
self->fbconfigs = NULL;
|
||||
}
|
||||
|
||||
if(self->library) {
|
||||
dlclose(self->library);
|
||||
memset(self, 0, sizeof(gsr_gl));
|
||||
}
|
||||
}
|
348
src/main.cpp
348
src/main.cpp
@ -1,7 +1,8 @@
|
||||
extern "C" {
|
||||
#include "../include/capture/nvfbc.h"
|
||||
#include "../include/capture/xcomposite.h"
|
||||
#include "../include/gl.h"
|
||||
#include "../include/capture/xcomposite_cuda.h"
|
||||
#include "../include/capture/xcomposite_drm.h"
|
||||
#include "../include/egl.h"
|
||||
#include "../include/time.h"
|
||||
}
|
||||
|
||||
@ -157,6 +158,9 @@ static void receive_frames(AVCodecContext *av_codec_context, int stream_index, A
|
||||
av_packet.stream_index = stream_index;
|
||||
av_packet.pts = av_packet.dts = frame->pts;
|
||||
|
||||
if(frame->flags & AV_FRAME_FLAG_DISCARD)
|
||||
av_packet.flags |= AV_PKT_FLAG_DISCARD;
|
||||
|
||||
std::lock_guard<std::mutex> lock(write_output_mutex);
|
||||
if(replay_buffer_size_secs != -1) {
|
||||
double time_now = clock_get_monotonic_seconds();
|
||||
@ -170,15 +174,16 @@ static void receive_frames(AVCodecContext *av_codec_context, int stream_index, A
|
||||
frame_data_queue.pop_front();
|
||||
frames_erased = true;
|
||||
}
|
||||
av_packet_unref(&av_packet);
|
||||
} else {
|
||||
av_packet_rescale_ts(&av_packet, av_codec_context->time_base, stream->time_base);
|
||||
av_packet.stream_index = stream->index;
|
||||
int ret = av_write_frame(av_format_context, &av_packet);
|
||||
// TODO: Is av_interleaved_write_frame needed?
|
||||
int ret = av_interleaved_write_frame(av_format_context, &av_packet);
|
||||
if(ret < 0) {
|
||||
fprintf(stderr, "Error: Failed to write frame index %d to muxer, reason: %s (%d)\n", av_packet.stream_index, av_error_to_string(ret), ret);
|
||||
}
|
||||
}
|
||||
av_packet_unref(&av_packet);
|
||||
} else if (res == AVERROR(EAGAIN)) { // we have no packet
|
||||
// fprintf(stderr, "No packet!\n");
|
||||
av_packet_unref(&av_packet);
|
||||
@ -195,7 +200,7 @@ static void receive_frames(AVCodecContext *av_codec_context, int stream_index, A
|
||||
}
|
||||
}
|
||||
|
||||
static AVCodecContext* create_audio_codec_context(AVFormatContext *av_format_context, int fps) {
|
||||
static AVCodecContext* create_audio_codec_context(int fps) {
|
||||
const AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_AAC);
|
||||
if (!codec) {
|
||||
fprintf(
|
||||
@ -223,28 +228,12 @@ static AVCodecContext* create_audio_codec_context(AVFormatContext *av_format_con
|
||||
codec_context->time_base.den = codec_context->sample_rate;
|
||||
codec_context->framerate.num = fps;
|
||||
codec_context->framerate.den = 1;
|
||||
|
||||
av_format_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
return codec_context;
|
||||
}
|
||||
|
||||
static const AVCodec* find_h264_encoder() {
|
||||
const AVCodec *codec = avcodec_find_encoder_by_name("h264_nvenc");
|
||||
if(!codec)
|
||||
codec = avcodec_find_encoder_by_name("nvenc_h264");
|
||||
return codec;
|
||||
}
|
||||
|
||||
static const AVCodec* find_h265_encoder() {
|
||||
const AVCodec *codec = avcodec_find_encoder_by_name("hevc_nvenc");
|
||||
if(!codec)
|
||||
codec = avcodec_find_encoder_by_name("nvenc_hevc");
|
||||
return codec;
|
||||
}
|
||||
|
||||
static AVCodecContext *create_video_codec_context(AVFormatContext *av_format_context,
|
||||
static AVCodecContext *create_video_codec_context(AVPixelFormat pix_fmt,
|
||||
VideoQuality video_quality,
|
||||
int fps, const AVCodec *codec, bool is_livestream) {
|
||||
|
||||
@ -275,7 +264,7 @@ static AVCodecContext *create_video_codec_context(AVFormatContext *av_format_con
|
||||
codec_context->gop_size = fps * 2;
|
||||
}
|
||||
codec_context->max_b_frames = 0;
|
||||
codec_context->pix_fmt = AV_PIX_FMT_CUDA;
|
||||
codec_context->pix_fmt = pix_fmt;
|
||||
codec_context->color_range = AVCOL_RANGE_JPEG;
|
||||
if(codec->id == AV_CODEC_ID_HEVC)
|
||||
codec_context->codec_tag = MKTAG('h', 'v', 'c', '1');
|
||||
@ -305,9 +294,6 @@ static AVCodecContext *create_video_codec_context(AVFormatContext *av_format_con
|
||||
if (codec_context->codec_id == AV_CODEC_ID_MPEG1VIDEO)
|
||||
codec_context->mb_decision = 2;
|
||||
|
||||
if(codec_context->codec_id == AV_CODEC_ID_H264)
|
||||
codec_context->profile = FF_PROFILE_H264_HIGH;
|
||||
|
||||
// stream->time_base = codec_context->time_base;
|
||||
// codec_context->ticks_per_frame = 30;
|
||||
//av_opt_set(codec_context->priv_data, "tune", "hq", 0);
|
||||
@ -331,12 +317,76 @@ static AVCodecContext *create_video_codec_context(AVFormatContext *av_format_con
|
||||
//codec_context->rc_min_rate = codec_context->bit_rate;
|
||||
//codec_context->rc_buffer_size = codec_context->bit_rate / 10;
|
||||
|
||||
av_format_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
return codec_context;
|
||||
}
|
||||
|
||||
#if 0
|
||||
static const AVCodec* find_h264_encoder() {
|
||||
const AVCodec *codec = avcodec_find_encoder_by_name("h264_vaapi");
|
||||
if(!codec)
|
||||
codec = avcodec_find_encoder_by_name("vaapi_h264");
|
||||
return codec;
|
||||
}
|
||||
|
||||
static const AVCodec* find_h265_encoder() {
|
||||
const AVCodec *codec = avcodec_find_encoder_by_name("hevc_vaapi");
|
||||
if(!codec)
|
||||
codec = avcodec_find_encoder_by_name("vaapi_hevc");
|
||||
return codec;
|
||||
}
|
||||
#else
|
||||
static const AVCodec* find_h264_encoder() {
|
||||
const AVCodec *codec = avcodec_find_encoder_by_name("h264_nvenc");
|
||||
if(!codec)
|
||||
codec = avcodec_find_encoder_by_name("nvenc_h264");
|
||||
|
||||
static bool checked = false;
|
||||
if(!checked) {
|
||||
checked = true;
|
||||
// Do not use AV_PIX_FMT_CUDA because we dont want to do full check with hardware context
|
||||
AVCodecContext *codec_context = create_video_codec_context(AV_PIX_FMT_YUV420P, VideoQuality::VERY_HIGH, 60, codec, false);
|
||||
codec_context->width = 1920;
|
||||
codec_context->height = 1080;
|
||||
if(codec_context) {
|
||||
if (avcodec_open2(codec_context, codec_context->codec, NULL) < 0) {
|
||||
avcodec_free_context(&codec_context);
|
||||
return nullptr;
|
||||
}
|
||||
avcodec_free_context(&codec_context);
|
||||
}
|
||||
}
|
||||
return codec;
|
||||
}
|
||||
|
||||
static const AVCodec* find_h265_encoder() {
|
||||
const AVCodec *codec = avcodec_find_encoder_by_name("hevc_nvenc");
|
||||
if(!codec)
|
||||
codec = avcodec_find_encoder_by_name("nvenc_hevc");
|
||||
|
||||
if(!codec)
|
||||
return nullptr;
|
||||
|
||||
static bool checked = false;
|
||||
if(!checked) {
|
||||
checked = true;
|
||||
// Do not use AV_PIX_FMT_CUDA because we dont want to do full check with hardware context
|
||||
AVCodecContext *codec_context = create_video_codec_context(AV_PIX_FMT_YUV420P, VideoQuality::VERY_HIGH, 60, codec, false);
|
||||
codec_context->width = 1920;
|
||||
codec_context->height = 1080;
|
||||
if(codec_context) {
|
||||
if (avcodec_open2(codec_context, codec_context->codec, NULL) < 0) {
|
||||
avcodec_free_context(&codec_context);
|
||||
return nullptr;
|
||||
}
|
||||
avcodec_free_context(&codec_context);
|
||||
}
|
||||
}
|
||||
return codec;
|
||||
}
|
||||
#endif
|
||||
|
||||
static AVFrame* open_audio(AVCodecContext *audio_codec_context) {
|
||||
int ret;
|
||||
ret = avcodec_open2(audio_codec_context, audio_codec_context->codec, nullptr);
|
||||
@ -371,15 +421,15 @@ static AVFrame* open_audio(AVCodecContext *audio_codec_context) {
|
||||
|
||||
static void open_video(AVCodecContext *codec_context, VideoQuality video_quality, bool very_old_gpu) {
|
||||
bool supports_p4 = false;
|
||||
bool supports_p7 = false;
|
||||
bool supports_p6 = false;
|
||||
|
||||
const AVOption *opt = nullptr;
|
||||
while((opt = av_opt_next(codec_context->priv_data, opt))) {
|
||||
if(opt->type == AV_OPT_TYPE_CONST) {
|
||||
if(strcmp(opt->name, "p4") == 0)
|
||||
supports_p4 = true;
|
||||
else if(strcmp(opt->name, "p7") == 0)
|
||||
supports_p7 = true;
|
||||
else if(strcmp(opt->name, "p6") == 0)
|
||||
supports_p6 = true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -416,7 +466,7 @@ static void open_video(AVCodecContext *codec_context, VideoQuality video_quality
|
||||
}
|
||||
}
|
||||
|
||||
if(!supports_p4 && !supports_p7)
|
||||
if(!supports_p4 && !supports_p6)
|
||||
fprintf(stderr, "Info: your ffmpeg version is outdated. It's recommended that you use the flatpak version of gpu-screen-recorder version instead, which you can find at https://flathub.org/apps/details/com.dec05eba.gpu_screen_recorder\n");
|
||||
|
||||
//if(is_livestream) {
|
||||
@ -429,10 +479,11 @@ static void open_video(AVCodecContext *codec_context, VideoQuality video_quality
|
||||
// with pretty good performance but you now have to choose p1-p7, which are gpu agnostic and on
|
||||
// older gpus p5-p7 slow the gpu down to a crawl...
|
||||
// "hq" is now just an alias for p7 in ffmpeg :(
|
||||
// TODO: Temporary disable because of stuttering?
|
||||
if(very_old_gpu)
|
||||
av_dict_set(&options, "preset", supports_p4 ? "p4" : "medium", 0);
|
||||
else
|
||||
av_dict_set(&options, "preset", supports_p7 ? "p7" : "slow", 0);
|
||||
av_dict_set(&options, "preset", supports_p6 ? "p6" : "slow", 0);
|
||||
|
||||
av_dict_set(&options, "tune", "hq", 0);
|
||||
av_dict_set(&options, "rc", "constqp", 0);
|
||||
@ -448,11 +499,11 @@ static void open_video(AVCodecContext *codec_context, VideoQuality video_quality
|
||||
}
|
||||
|
||||
static void usage() {
|
||||
fprintf(stderr, "usage: gpu-screen-recorder -w <window_id> -c <container_format> -f <fps> [-a <audio_input>...] [-q <quality>] [-r <replay_buffer_size_sec>] [-o <output_file>]\n");
|
||||
fprintf(stderr, "usage: gpu-screen-recorder -w <window_id> [-c <container_format>] -f <fps> [-a <audio_input>...] [-q <quality>] [-r <replay_buffer_size_sec>] [-o <output_file>]\n");
|
||||
fprintf(stderr, "OPTIONS:\n");
|
||||
fprintf(stderr, " -w Window to record or a display, \"screen\" or \"screen-direct\". The display is the display name in xrandr and if \"screen\" or \"screen-direct\" is selected then all displays are recorded and they are recorded in h265 (aka hevc)."
|
||||
"\"screen-direct\" skips one texture copy for fullscreen applications so it may lead to better performance and it works with VRR monitors when recording fullscreen application but may break some applications, such as mpv in fullscreen mode. Recording a display requires a gpu with NvFBC support.\n");
|
||||
fprintf(stderr, " -c Container format for output file, for example mp4, or flv.\n");
|
||||
fprintf(stderr, " -c Container format for output file, for example mp4, or flv. Only required if no output file is specified or if recording in replay buffer mode. If an output file is specified and -c is not used then the container format is determined from the output filename extension.\n");
|
||||
fprintf(stderr, " -f Framerate to record at.\n");
|
||||
fprintf(stderr, " -a Audio device to record from (pulse audio device). Can be specified multiple times. Each time this is specified a new audio track is added for the specified audio device. A name can be given to the audio input device by prefixing the audio input with <name>/, for example \"dummy/alsa_output.pci-0000_00_1b.0.analog-stereo.monitor\". Optional, no audio track is added by default.\n");
|
||||
fprintf(stderr, " -q Video quality. Should be either 'medium', 'high', 'very_high' or 'ultra'. 'high' is the recommended option when live streaming or when you have a slower harddrive. Optional, set to 'very_high' be default.\n");
|
||||
@ -547,7 +598,7 @@ static std::future<void> save_replay_thread;
|
||||
static std::vector<AVPacket> save_replay_packets;
|
||||
static std::string save_replay_output_filepath;
|
||||
|
||||
static void save_replay_async(AVCodecContext *video_codec_context, int video_stream_index, std::vector<AudioTrack> &audio_tracks, const std::deque<AVPacket> &frame_data_queue, bool frames_erased, std::string output_dir, std::string container_format, std::mutex &write_output_mutex) {
|
||||
static void save_replay_async(AVCodecContext *video_codec_context, int video_stream_index, std::vector<AudioTrack> &audio_tracks, const std::deque<AVPacket> &frame_data_queue, bool frames_erased, std::string output_dir, const char *container_format, const std::string &file_extension, std::mutex &write_output_mutex) {
|
||||
if(save_replay_thread.valid())
|
||||
return;
|
||||
|
||||
@ -590,11 +641,10 @@ static void save_replay_async(AVCodecContext *video_codec_context, int video_str
|
||||
}
|
||||
}
|
||||
|
||||
save_replay_output_filepath = output_dir + "/Replay_" + get_date_str() + "." + container_format;
|
||||
save_replay_output_filepath = output_dir + "/Replay_" + get_date_str() + "." + file_extension;
|
||||
save_replay_thread = std::async(std::launch::async, [video_stream_index, container_format, start_index, video_pts_offset, audio_pts_offset, video_codec_context, &audio_tracks]() mutable {
|
||||
AVFormatContext *av_format_context;
|
||||
// The output format is automatically guessed from the file extension
|
||||
avformat_alloc_output_context2(&av_format_context, nullptr, container_format.c_str(), nullptr);
|
||||
avformat_alloc_output_context2(&av_format_context, nullptr, container_format, nullptr);
|
||||
|
||||
av_format_context->flags |= AVFMT_FLAG_GENPTS;
|
||||
av_format_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
@ -643,7 +693,7 @@ static void save_replay_async(AVCodecContext *video_codec_context, int video_str
|
||||
av_packet.stream_index = stream->index;
|
||||
av_packet_rescale_ts(&av_packet, codec_context->time_base, stream->time_base);
|
||||
|
||||
int ret = av_write_frame(av_format_context, &av_packet);
|
||||
int ret = av_interleaved_write_frame(av_format_context, &av_packet);
|
||||
if(ret < 0)
|
||||
fprintf(stderr, "Error: Failed to write frame index %d to muxer, reason: %s (%d)\n", stream->index, av_error_to_string(ret), ret);
|
||||
}
|
||||
@ -682,14 +732,68 @@ static bool is_livestream_path(const char *str) {
|
||||
return false;
|
||||
}
|
||||
|
||||
typedef enum {
|
||||
GPU_VENDOR_AMD,
|
||||
GPU_VENDOR_INTEL,
|
||||
GPU_VENDOR_NVIDIA
|
||||
} gpu_vendor;
|
||||
|
||||
typedef struct {
|
||||
gpu_vendor vendor;
|
||||
int gpu_version; /* 0 if unknown */
|
||||
} gpu_info;
|
||||
|
||||
static bool gl_get_gpu_info(Display *dpy, gpu_info *info) {
|
||||
gsr_egl gl;
|
||||
if(!gsr_egl_load(&gl, dpy)) {
|
||||
fprintf(stderr, "Error: failed to load opengl\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
bool supported = true;
|
||||
const unsigned char *gl_vendor = gl.glGetString(GL_VENDOR);
|
||||
const unsigned char *gl_renderer = gl.glGetString(GL_RENDERER);
|
||||
|
||||
info->gpu_version = 0;
|
||||
|
||||
if(!gl_vendor) {
|
||||
fprintf(stderr, "Error: failed to get gpu vendor\n");
|
||||
supported = false;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if(strstr((const char*)gl_vendor, "AMD"))
|
||||
info->vendor = GPU_VENDOR_AMD;
|
||||
else if(strstr((const char*)gl_vendor, "Intel"))
|
||||
info->vendor = GPU_VENDOR_INTEL;
|
||||
else if(strstr((const char*)gl_vendor, "NVIDIA"))
|
||||
info->vendor = GPU_VENDOR_NVIDIA;
|
||||
else {
|
||||
fprintf(stderr, "Error: unknown gpu vendor: %s\n", gl_vendor);
|
||||
supported = false;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if(gl_renderer) {
|
||||
if(info->vendor == GPU_VENDOR_NVIDIA)
|
||||
sscanf((const char*)gl_renderer, "%*s %*s %*s %d", &info->gpu_version);
|
||||
}
|
||||
|
||||
end:
|
||||
gsr_egl_unload(&gl);
|
||||
return supported;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
signal(SIGINT, int_handler);
|
||||
signal(SIGUSR1, save_replay_handler);
|
||||
|
||||
//av_log_set_level(AV_LOG_TRACE);
|
||||
|
||||
std::map<std::string, Arg> args = {
|
||||
{ "-w", Arg { {}, false, false } },
|
||||
//{ "-s", Arg { nullptr, true } },
|
||||
{ "-c", Arg { {}, false, false } },
|
||||
{ "-c", Arg { {}, true, false } },
|
||||
{ "-f", Arg { {}, false, false } },
|
||||
//{ "-s", Arg { {}, true, false } },
|
||||
{ "-a", Arg { {}, true, true } },
|
||||
@ -807,35 +911,31 @@ int main(int argc, char **argv) {
|
||||
Display *dpy = XOpenDisplay(nullptr);
|
||||
if (!dpy) {
|
||||
fprintf(stderr, "Error: Failed to open display\n");
|
||||
return 1;
|
||||
return 2;
|
||||
}
|
||||
|
||||
XSetErrorHandler(x11_error_handler);
|
||||
XSetIOErrorHandler(x11_io_error_handler);
|
||||
|
||||
gsr_gl gl;
|
||||
if(!gsr_gl_load(&gl, dpy)) {
|
||||
fprintf(stderr, "Error: failed to load opengl\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
gpu_info gpu_inf;
|
||||
bool very_old_gpu = false;
|
||||
const unsigned char *gl_renderer = gl.glGetString(GL_RENDERER);
|
||||
if(gl_renderer) {
|
||||
int gpu_num = 1000;
|
||||
sscanf((const char*)gl_renderer, "%*s %*s %*s %d", &gpu_num);
|
||||
if(gpu_num < 900) {
|
||||
if(!gl_get_gpu_info(dpy, &gpu_inf))
|
||||
return 2;
|
||||
|
||||
if(gpu_inf.vendor == GPU_VENDOR_NVIDIA && gpu_inf.gpu_version != 0 && gpu_inf.gpu_version < 900) {
|
||||
fprintf(stderr, "Info: your gpu appears to be very old (older than maxwell architecture). Switching to lower preset\n");
|
||||
very_old_gpu = true;
|
||||
}
|
||||
}
|
||||
|
||||
gsr_gl_unload(&gl);
|
||||
|
||||
const char *window_str = args["-w"].value();
|
||||
|
||||
gsr_capture *capture = nullptr;
|
||||
if(contains_non_hex_number(window_str)) {
|
||||
if(gpu_inf.vendor != GPU_VENDOR_NVIDIA) {
|
||||
fprintf(stderr, "Error: recording a monitor is only supported on NVIDIA right now\n");
|
||||
return 2;
|
||||
}
|
||||
|
||||
const char *capture_target = window_str;
|
||||
bool direct_capture = strcmp(window_str, "screen-direct") == 0;
|
||||
if(direct_capture) {
|
||||
@ -874,19 +974,45 @@ int main(int argc, char **argv) {
|
||||
usage();
|
||||
}
|
||||
|
||||
gsr_capture_xcomposite_params xcomposite_params;
|
||||
switch(gpu_inf.vendor) {
|
||||
case GPU_VENDOR_AMD: {
|
||||
gsr_capture_xcomposite_drm_params xcomposite_params;
|
||||
xcomposite_params.window = src_window_id;
|
||||
capture = gsr_capture_xcomposite_create(&xcomposite_params);
|
||||
capture = gsr_capture_xcomposite_drm_create(&xcomposite_params);
|
||||
if(!capture)
|
||||
return 1;
|
||||
break;
|
||||
}
|
||||
case GPU_VENDOR_INTEL: {
|
||||
gsr_capture_xcomposite_drm_params xcomposite_params;
|
||||
xcomposite_params.window = src_window_id;
|
||||
capture = gsr_capture_xcomposite_drm_create(&xcomposite_params);
|
||||
if(!capture)
|
||||
return 1;
|
||||
break;
|
||||
}
|
||||
case GPU_VENDOR_NVIDIA: {
|
||||
gsr_capture_xcomposite_cuda_params xcomposite_params;
|
||||
xcomposite_params.window = src_window_id;
|
||||
capture = gsr_capture_xcomposite_cuda_create(&xcomposite_params);
|
||||
if(!capture)
|
||||
return 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const char *filename = args["-o"].value();
|
||||
if(filename) {
|
||||
if(replay_buffer_size_secs != -1) {
|
||||
if(!container_format) {
|
||||
fprintf(stderr, "Error: option -c is required when using option -r\n");
|
||||
usage();
|
||||
}
|
||||
|
||||
struct stat buf;
|
||||
if(stat(filename, &buf) == -1 || !S_ISDIR(buf.st_mode)) {
|
||||
fprintf(stderr, "%s does not exist or is not a directory\n", filename);
|
||||
fprintf(stderr, "Error: directory \"%s\" does not exist or is not a directory\n", filename);
|
||||
usage();
|
||||
}
|
||||
}
|
||||
@ -894,9 +1020,33 @@ int main(int argc, char **argv) {
|
||||
if(replay_buffer_size_secs == -1) {
|
||||
filename = "/dev/stdout";
|
||||
} else {
|
||||
fprintf(stderr, "Option -o is required when using option -r\n");
|
||||
fprintf(stderr, "Error: Option -o is required when using option -r\n");
|
||||
usage();
|
||||
}
|
||||
|
||||
if(!container_format) {
|
||||
fprintf(stderr, "Error: option -c is required when not using option -o\n");
|
||||
usage();
|
||||
}
|
||||
}
|
||||
|
||||
AVFormatContext *av_format_context;
|
||||
// The output format is automatically guessed by the file extension
|
||||
avformat_alloc_output_context2(&av_format_context, nullptr, container_format, filename);
|
||||
if (!av_format_context) {
|
||||
fprintf(stderr, "Error: Failed to deduce container format from file extension\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
av_format_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
av_format_context->flags |= AVFMT_FLAG_GENPTS;
|
||||
const AVOutputFormat *output_format = av_format_context->oformat;
|
||||
|
||||
std::string file_extension = output_format->extensions;
|
||||
{
|
||||
size_t comma_index = file_extension.find(',');
|
||||
if(comma_index != std::string::npos)
|
||||
file_extension = file_extension.substr(0, comma_index);
|
||||
}
|
||||
|
||||
const double target_fps = 1.0 / (double)fps;
|
||||
@ -909,6 +1059,8 @@ int main(int argc, char **argv) {
|
||||
// while with h264 the fps doesn't drop.
|
||||
if(!h265_codec) {
|
||||
fprintf(stderr, "Info: using h264 encoder because a codec was not specified and your gpu does not support h265\n");
|
||||
codec_to_use = "h264";
|
||||
video_codec = VideoCodec::H264;
|
||||
} else if(fps > 60) {
|
||||
fprintf(stderr, "Info: using h264 encoder because a codec was not specified and fps is more than 60\n");
|
||||
codec_to_use = "h264";
|
||||
@ -921,7 +1073,7 @@ int main(int argc, char **argv) {
|
||||
}
|
||||
|
||||
//bool use_hevc = strcmp(window_str, "screen") == 0 || strcmp(window_str, "screen-direct") == 0;
|
||||
if(video_codec != VideoCodec::H264 && strcmp(container_format, "flv") == 0) {
|
||||
if(video_codec != VideoCodec::H264 && strcmp(file_extension.c_str(), "flv") == 0) {
|
||||
video_codec = VideoCodec::H264;
|
||||
fprintf(stderr, "Warning: h265 is not compatible with flv, falling back to h264 instead.\n");
|
||||
}
|
||||
@ -941,21 +1093,6 @@ int main(int argc, char **argv) {
|
||||
exit(2);
|
||||
}
|
||||
|
||||
// Video start
|
||||
AVFormatContext *av_format_context;
|
||||
// The output format is automatically guessed by the file extension
|
||||
avformat_alloc_output_context2(&av_format_context, nullptr, container_format,
|
||||
nullptr);
|
||||
if (!av_format_context) {
|
||||
fprintf(
|
||||
stderr,
|
||||
"Error: Failed to deduce output format from file extension\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
av_format_context->flags |= AVFMT_FLAG_GENPTS;
|
||||
const AVOutputFormat *output_format = av_format_context->oformat;
|
||||
|
||||
const bool is_livestream = is_livestream_path(filename);
|
||||
// (Some?) livestreaming services require at least one audio track to work.
|
||||
// If not audio is provided then create one silent audio track.
|
||||
@ -967,7 +1104,7 @@ int main(int argc, char **argv) {
|
||||
AVStream *video_stream = nullptr;
|
||||
std::vector<AudioTrack> audio_tracks;
|
||||
|
||||
AVCodecContext *video_codec_context = create_video_codec_context(av_format_context, quality, fps, video_codec_f, is_livestream);
|
||||
AVCodecContext *video_codec_context = create_video_codec_context(AV_PIX_FMT_CUDA, quality, fps, video_codec_f, is_livestream);
|
||||
if(replay_buffer_size_secs == -1)
|
||||
video_stream = create_stream(av_format_context, video_codec_context);
|
||||
|
||||
@ -982,7 +1119,7 @@ int main(int argc, char **argv) {
|
||||
|
||||
int audio_stream_index = VIDEO_STREAM_INDEX + 1;
|
||||
for(const AudioInput &audio_input : requested_audio_inputs) {
|
||||
AVCodecContext *audio_codec_context = create_audio_codec_context(av_format_context, fps);
|
||||
AVCodecContext *audio_codec_context = create_audio_codec_context(fps);
|
||||
|
||||
AVStream *audio_stream = nullptr;
|
||||
if(replay_buffer_size_secs == -1)
|
||||
@ -1028,13 +1165,7 @@ int main(int argc, char **argv) {
|
||||
frame->format = video_codec_context->pix_fmt;
|
||||
frame->width = video_codec_context->width;
|
||||
frame->height = video_codec_context->height;
|
||||
|
||||
if(video_codec_context->hw_frames_ctx) {
|
||||
// TODO: Unref at the end?
|
||||
frame->hw_frames_ctx = av_buffer_ref(video_codec_context->hw_frames_ctx);
|
||||
frame->buf[0] = av_buffer_pool_get(((AVHWFramesContext*)video_codec_context->hw_frames_ctx->data)->pool);
|
||||
frame->extended_data = frame->data;
|
||||
}
|
||||
frame->color_range = AVCOL_RANGE_JPEG;
|
||||
|
||||
std::mutex write_output_mutex;
|
||||
|
||||
@ -1103,15 +1234,21 @@ int main(int argc, char **argv) {
|
||||
break;
|
||||
}
|
||||
|
||||
const int64_t num_missing_frames = std::round((this_audio_frame_time - received_audio_time) / target_audio_hz / (int64_t)audio_track.frame->nb_samples);
|
||||
int64_t num_missing_frames = std::round((this_audio_frame_time - received_audio_time) / target_audio_hz / (int64_t)audio_track.frame->nb_samples);
|
||||
if(got_audio_data)
|
||||
num_missing_frames = std::max((int64_t)0, num_missing_frames - 1);
|
||||
|
||||
if(!audio_track.sound_device.handle)
|
||||
num_missing_frames = std::max((int64_t)1, num_missing_frames);
|
||||
|
||||
// Jesus is there a better way to do this? I JUST WANT TO KEEP VIDEO AND AUDIO SYNCED HOLY FUCK I WANT TO KILL MYSELF NOW.
|
||||
// THIS PIECE OF SHIT WANTS EMPTY FRAMES OTHERWISE VIDEO PLAYS TOO FAST TO KEEP UP WITH AUDIO OR THE AUDIO PLAYS TOO EARLY.
|
||||
// BUT WE CANT USE DELAYS TO GIVE DUMMY DATA BECAUSE PULSEAUDIO MIGHT GIVE AUDIO A BIG DELAYED!!!
|
||||
if(num_missing_frames >= 5 || (num_missing_frames > 0 && got_audio_data)) {
|
||||
if(num_missing_frames >= 5 || !audio_track.sound_device.handle) {
|
||||
// TODO:
|
||||
//audio_track.frame->data[0] = empty_audio;
|
||||
received_audio_time = this_audio_frame_time;
|
||||
swr_convert(swr, &audio_track.frame->data[0], audio_track.frame->nb_samples, (const uint8_t**)&empty_audio, audio_track.sound_device.frames);
|
||||
swr_convert(swr, &audio_track.frame->data[0], audio_track.frame->nb_samples, (const uint8_t**)&empty_audio, audio_track.codec_context->frame_size);
|
||||
// TODO: Check if duplicate frame can be saved just by writing it with a different pts instead of sending it again
|
||||
for(int i = 0; i < num_missing_frames; ++i) {
|
||||
audio_track.frame->pts = pts;
|
||||
@ -1125,26 +1262,12 @@ int main(int argc, char **argv) {
|
||||
}
|
||||
}
|
||||
|
||||
if(!audio_track.sound_device.handle) {
|
||||
// TODO:
|
||||
//audio_track.frame->data[0] = empty_audio;
|
||||
received_audio_time = this_audio_frame_time;
|
||||
swr_convert(swr, &audio_track.frame->data[0], audio_track.frame->nb_samples, (const uint8_t**)&empty_audio, audio_track.codec_context->frame_size);
|
||||
audio_track.frame->pts = pts;
|
||||
pts += audio_track.frame->nb_samples;
|
||||
ret = avcodec_send_frame(audio_track.codec_context, audio_track.frame);
|
||||
if(ret >= 0){
|
||||
receive_frames(audio_track.codec_context, audio_track.stream_index, audio_track.stream, audio_track.frame, av_format_context, record_start_time, frame_data_queue, replay_buffer_size_secs, frames_erased, *write_output_mutex);
|
||||
} else {
|
||||
fprintf(stderr, "Failed to encode audio!\n");
|
||||
}
|
||||
|
||||
if(!audio_track.sound_device.handle)
|
||||
usleep(timeout_ms * 1000);
|
||||
}
|
||||
|
||||
if(got_audio_data) {
|
||||
// TODO: Instead of converting audio, get float audio from alsa. Or does alsa do conversion internally to get this format?
|
||||
swr_convert(swr, &audio_track.frame->data[0], audio_track.frame->nb_samples, (const uint8_t**)&sound_buffer, audio_track.sound_device.frames);
|
||||
swr_convert(swr, &audio_track.frame->data[0], audio_track.frame->nb_samples, (const uint8_t**)&sound_buffer, audio_track.codec_context->frame_size);
|
||||
|
||||
audio_track.frame->pts = pts;
|
||||
pts += audio_track.frame->nb_samples;
|
||||
@ -1197,14 +1320,20 @@ int main(int argc, char **argv) {
|
||||
const int64_t expected_frames = std::round((this_video_frame_time - start_time_pts) / target_fps);
|
||||
|
||||
const int num_frames = std::max(0L, expected_frames - video_pts_counter);
|
||||
|
||||
frame->flags &= ~AV_FRAME_FLAG_DISCARD;
|
||||
// TODO: Check if duplicate frame can be saved just by writing it with a different pts instead of sending it again
|
||||
for(int i = 0; i < num_frames; ++i) {
|
||||
if(i > 0)
|
||||
frame->flags |= AV_FRAME_FLAG_DISCARD;
|
||||
|
||||
frame->pts = video_pts_counter + i;
|
||||
if (avcodec_send_frame(video_codec_context, frame) >= 0) {
|
||||
int ret = avcodec_send_frame(video_codec_context, frame);
|
||||
if (ret >= 0) {
|
||||
receive_frames(video_codec_context, VIDEO_STREAM_INDEX, video_stream, frame, av_format_context,
|
||||
record_start_time, frame_data_queue, replay_buffer_size_secs, frames_erased, write_output_mutex);
|
||||
} else {
|
||||
fprintf(stderr, "Error: avcodec_send_frame failed\n");
|
||||
fprintf(stderr, "Error: avcodec_send_frame failed, error: %s\n", av_error_to_string(ret));
|
||||
}
|
||||
}
|
||||
video_pts_counter += num_frames;
|
||||
@ -1213,15 +1342,12 @@ int main(int argc, char **argv) {
|
||||
if(save_replay_thread.valid() && save_replay_thread.wait_for(std::chrono::seconds(0)) == std::future_status::ready) {
|
||||
save_replay_thread.get();
|
||||
puts(save_replay_output_filepath.c_str());
|
||||
for(size_t i = 0; i < save_replay_packets.size(); ++i) {
|
||||
av_packet_unref(&save_replay_packets[i]);
|
||||
}
|
||||
save_replay_packets.clear();
|
||||
}
|
||||
|
||||
if(save_replay == 1 && !save_replay_thread.valid() && replay_buffer_size_secs != -1) {
|
||||
save_replay = 0;
|
||||
save_replay_async(video_codec_context, VIDEO_STREAM_INDEX, audio_tracks, frame_data_queue, frames_erased, filename, container_format, write_output_mutex);
|
||||
save_replay_async(video_codec_context, VIDEO_STREAM_INDEX, audio_tracks, frame_data_queue, frames_erased, filename, container_format, file_extension, write_output_mutex);
|
||||
}
|
||||
|
||||
// av_frame_free(&frame);
|
||||
@ -1234,8 +1360,10 @@ int main(int argc, char **argv) {
|
||||
|
||||
running = 0;
|
||||
|
||||
if(save_replay_thread.valid())
|
||||
if(save_replay_thread.valid()) {
|
||||
save_replay_thread.get();
|
||||
puts(save_replay_output_filepath.c_str());
|
||||
}
|
||||
|
||||
for(AudioTrack &audio_track : audio_tracks) {
|
||||
audio_track.thread.join();
|
||||
|
@ -13,14 +13,16 @@ static int x11_supports_composite_named_window_pixmap(Display *display) {
|
||||
return XCompositeQueryVersion(display, &major_version, &minor_version) && (major_version > 0 || minor_version >= 2);
|
||||
}
|
||||
|
||||
int window_texture_init(WindowTexture *window_texture, Display *display, Window window, gsr_gl *gl) {
|
||||
int window_texture_init(WindowTexture *window_texture, Display *display, Window window, gsr_egl *egl) {
|
||||
window_texture->display = display;
|
||||
window_texture->window = window;
|
||||
window_texture->pixmap = None;
|
||||
window_texture->glx_pixmap = None;
|
||||
window_texture->texture_id = 0;
|
||||
window_texture->target_texture_id = 0;
|
||||
window_texture->texture_width = 0;
|
||||
window_texture->texture_height = 0;
|
||||
window_texture->redirected = 0;
|
||||
window_texture->gl = gl;
|
||||
window_texture->egl = egl;
|
||||
|
||||
if(!x11_supports_composite_named_window_pixmap(display))
|
||||
return 1;
|
||||
@ -32,14 +34,13 @@ int window_texture_init(WindowTexture *window_texture, Display *display, Window
|
||||
|
||||
static void window_texture_cleanup(WindowTexture *self, int delete_texture) {
|
||||
if(delete_texture && self->texture_id) {
|
||||
self->gl->glDeleteTextures(1, &self->texture_id);
|
||||
self->egl->glDeleteTextures(1, &self->texture_id);
|
||||
self->texture_id = 0;
|
||||
}
|
||||
|
||||
if(self->glx_pixmap) {
|
||||
self->gl->glXDestroyPixmap(self->display, self->glx_pixmap);
|
||||
self->gl->glXReleaseTexImageEXT(self->display, self->glx_pixmap, GLX_FRONT_EXT);
|
||||
self->glx_pixmap = None;
|
||||
if(delete_texture && self->target_texture_id) {
|
||||
self->egl->glDeleteTextures(1, &self->target_texture_id);
|
||||
self->target_texture_id = 0;
|
||||
}
|
||||
|
||||
if(self->pixmap) {
|
||||
@ -56,118 +57,84 @@ void window_texture_deinit(WindowTexture *self) {
|
||||
window_texture_cleanup(self, 1);
|
||||
}
|
||||
|
||||
|
||||
#define EGL_TRUE 1
|
||||
#define EGL_IMAGE_PRESERVED_KHR 0x30D2
|
||||
#define EGL_NATIVE_PIXMAP_KHR 0x30B0
|
||||
|
||||
int window_texture_on_resize(WindowTexture *self) {
|
||||
window_texture_cleanup(self, 0);
|
||||
|
||||
int result = 0;
|
||||
GLXFBConfig *configs = NULL;
|
||||
Pixmap pixmap = None;
|
||||
GLXPixmap glx_pixmap = None;
|
||||
unsigned int texture_id = 0;
|
||||
int glx_pixmap_bound = 0;
|
||||
EGLImage image = NULL;
|
||||
|
||||
const int pixmap_config[] = {
|
||||
GLX_BIND_TO_TEXTURE_RGB_EXT, True,
|
||||
GLX_DRAWABLE_TYPE, GLX_PIXMAP_BIT | GLX_WINDOW_BIT,
|
||||
GLX_BIND_TO_TEXTURE_TARGETS_EXT, GLX_TEXTURE_2D_BIT_EXT,
|
||||
/*GLX_BIND_TO_MIPMAP_TEXTURE_EXT, True,*/
|
||||
GLX_BUFFER_SIZE, 24,
|
||||
GLX_RED_SIZE, 8,
|
||||
GLX_GREEN_SIZE, 8,
|
||||
GLX_BLUE_SIZE, 8,
|
||||
GLX_ALPHA_SIZE, 0,
|
||||
None
|
||||
const intptr_t pixmap_attrs[] = {
|
||||
EGL_IMAGE_PRESERVED_KHR, EGL_TRUE,
|
||||
EGL_NONE,
|
||||
};
|
||||
|
||||
const int pixmap_attribs[] = {
|
||||
GLX_TEXTURE_TARGET_EXT, GLX_TEXTURE_2D_EXT,
|
||||
GLX_TEXTURE_FORMAT_EXT, GLX_TEXTURE_FORMAT_RGB_EXT,
|
||||
/*GLX_MIPMAP_TEXTURE_EXT, True,*/
|
||||
None
|
||||
};
|
||||
|
||||
XWindowAttributes attr;
|
||||
if (!XGetWindowAttributes(self->display, self->window, &attr)) {
|
||||
fprintf(stderr, "Failed to get window attributes\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
GLXFBConfig config;
|
||||
int c;
|
||||
configs = self->gl->glXChooseFBConfig(self->display, 0, pixmap_config, &c);
|
||||
if(!configs) {
|
||||
fprintf(stderr, "Failed to choose fb config\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
int found = 0;
|
||||
for (int i = 0; i < c; i++) {
|
||||
config = configs[i];
|
||||
XVisualInfo *visual = self->gl->glXGetVisualFromFBConfig(self->display, config);
|
||||
if (!visual)
|
||||
continue;
|
||||
|
||||
if (attr.depth != visual->depth) {
|
||||
XFree(visual);
|
||||
continue;
|
||||
}
|
||||
XFree(visual);
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
if(!found) {
|
||||
fprintf(stderr, "No matching fb config found\n");
|
||||
result = 1;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
pixmap = XCompositeNameWindowPixmap(self->display, self->window);
|
||||
if(!pixmap) {
|
||||
result = 2;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
glx_pixmap = self->gl->glXCreatePixmap(self->display, config, pixmap, pixmap_attribs);
|
||||
if(!glx_pixmap) {
|
||||
result = 3;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if(self->texture_id == 0) {
|
||||
self->gl->glGenTextures(1, &texture_id);
|
||||
self->egl->glGenTextures(1, &texture_id);
|
||||
if(texture_id == 0) {
|
||||
result = 4;
|
||||
goto cleanup;
|
||||
}
|
||||
self->gl->glBindTexture(GL_TEXTURE_2D, texture_id);
|
||||
self->egl->glBindTexture(GL_TEXTURE_2D, texture_id);
|
||||
} else {
|
||||
self->gl->glBindTexture(GL_TEXTURE_2D, self->texture_id);
|
||||
self->egl->glBindTexture(GL_TEXTURE_2D, self->texture_id);
|
||||
texture_id = self->texture_id;
|
||||
}
|
||||
|
||||
self->gl->glXBindTexImageEXT(self->display, glx_pixmap, GLX_FRONT_EXT, NULL);
|
||||
glx_pixmap_bound = 1;
|
||||
image = self->egl->eglCreateImage(self->egl->egl_display, NULL, EGL_NATIVE_PIXMAP_KHR, (EGLClientBuffer)pixmap, pixmap_attrs);
|
||||
if(!image) {
|
||||
fprintf(stderr, "eglCreateImage failed\n");
|
||||
return -1;
|
||||
}
|
||||
fprintf(stderr, "gl error: %d\n", self->egl->glGetError());
|
||||
|
||||
self->gl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
self->gl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
self->gl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
||||
self->gl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
fprintf(stderr, "image: %p\n", image);
|
||||
self->egl->glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
|
||||
if(self->egl->glGetError() != 0) {
|
||||
fprintf(stderr, "glEGLImageTargetTexture2DOES failed\n");
|
||||
}
|
||||
|
||||
self->gl->glBindTexture(GL_TEXTURE_2D, 0);
|
||||
self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
|
||||
self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
|
||||
|
||||
self->egl->glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
||||
XFree(configs);
|
||||
self->pixmap = pixmap;
|
||||
self->glx_pixmap = glx_pixmap;
|
||||
if(texture_id != 0)
|
||||
if(texture_id != 0) {
|
||||
self->texture_id = texture_id;
|
||||
|
||||
self->egl->glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &self->texture_width);
|
||||
self->egl->glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &self->texture_height);
|
||||
|
||||
fprintf(stderr, "texture width: %d, height: %d\n", self->texture_width, self->texture_height);
|
||||
|
||||
self->egl->glGenTextures(1, &self->target_texture_id);
|
||||
self->egl->glBindTexture(GL_TEXTURE_2D, self->target_texture_id);
|
||||
self->egl->glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, self->texture_width, self->texture_height, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
|
||||
fprintf(stderr, "gl error: %d\n", self->egl->glGetError());
|
||||
self->egl->glBindTexture(GL_TEXTURE_2D, 0);
|
||||
}
|
||||
|
||||
// TODO: destroyImage(image)
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
if(texture_id != 0) self->gl->glDeleteTextures(1, &texture_id);
|
||||
if(glx_pixmap) self->gl->glXDestroyPixmap(self->display, glx_pixmap);
|
||||
if(glx_pixmap_bound) self->gl->glXReleaseTexImageEXT(self->display, glx_pixmap, GLX_FRONT_EXT);
|
||||
if(texture_id != 0) self->egl->glDeleteTextures(1, &texture_id);
|
||||
if(pixmap) XFreePixmap(self->display, pixmap);
|
||||
if(configs) XFree(configs);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user