Compare commits

...

30 Commits
v6.8 ... v6.10

Author SHA1 Message Date
Maxim Devaev
9ec59143dd Bump version: 6.9 → 6.10 2024-04-01 22:03:40 +03:00
Maxim Devaev
e059a21ef9 refactoring 2024-04-01 21:40:51 +03:00
Maxim Devaev
074ce86f67 using fps_meta instead of flags 2024-04-01 00:12:42 +03:00
Maxim Devaev
b8b67de5cf mutexless fpsi at all 2024-03-31 17:33:51 +03:00
Maxim Devaev
5f3198e72f sort of fps reset 2024-03-30 21:37:13 +02:00
Maxim Devaev
3a3889d02c fpsi: mutexless mode 2024-03-30 19:34:43 +02:00
Maxim Devaev
88203f9c53 fix 2024-03-30 19:05:59 +02:00
Maxim Devaev
24aca349a3 we don't need us_fpsi_reset() anymore 2024-03-30 19:05:15 +02:00
Maxim Devaev
a9e0cb49e9 h264 and drm statistics in http 2024-03-30 17:48:15 +02:00
Maxim Devaev
4ec3f11935 refactoring 2024-03-30 16:10:46 +02:00
Maxim Devaev
14e9d9f7af fps -> fpsi, store frame meta 2024-03-30 15:35:14 +02:00
Maxim Devaev
580ca68291 US_FRAME_META_DECLARE 2024-03-30 13:13:17 +02:00
Maxim Devaev
37f3f093dc simplified list declaration 2024-03-30 13:07:20 +02:00
Maxim Devaev
70fa6548fe common fps counter 2024-03-30 12:15:59 +02:00
Maxim Devaev
f8a703f166 refactoring 2024-03-29 22:58:07 +02:00
Maxim Devaev
3f69dd785f fix 2024-03-29 15:41:54 +02:00
Maxim Devaev
8e6c374acf refactoring 2024-03-29 15:36:43 +02:00
Maxim Devaev
caf9ed7bfe refactoring 2024-03-29 03:34:35 +02:00
Maxim Devaev
94b1224456 fix 2024-03-29 02:24:36 +02:00
Maxim Devaev
c8201df720 don't rebuild python module without necessary 2024-03-29 01:15:02 +02:00
Maxim Devaev
e0f09f65a1 new macro US_ONCE_FOR() 2024-03-29 01:02:40 +02:00
Maxim Devaev
4e1f62bfac refactoring 2024-03-29 00:13:08 +02:00
Maxim Devaev
b0b881f199 fix 2024-03-28 18:38:01 +02:00
Maxim Devaev
a21f527bce common error constants 2024-03-28 17:17:22 +02:00
Maxim Devaev
d64077c2d5 Bump version: 6.8 → 6.9 2024-03-27 21:39:03 +02:00
Maxim Devaev
83f12baa61 refactoring 2024-03-27 19:27:28 +02:00
Maxim Devaev
b6fac2608d ustreamer-v4p: bring back busy message 2024-03-27 19:22:21 +02:00
Maxim Devaev
e6ebc12505 replaced comment 2024-03-27 02:14:36 +02:00
Maxim Devaev
8c92ab6f47 ustreamer: blank drm output by timeout 2024-03-26 22:20:08 +02:00
Maxim Devaev
7dc492d875 refactoring 2024-03-26 21:51:47 +02:00
39 changed files with 938 additions and 825 deletions

View File

@@ -1,7 +1,7 @@
[bumpversion]
commit = True
tag = True
current_version = 6.8
current_version = 6.10
parse = (?P<major>\d+)\.(?P<minor>\d+)
serialize =
{major}.{minor}

View File

@@ -32,6 +32,7 @@
#include <opus/opus.h>
#include "uslibs/types.h"
#include "uslibs/errors.h"
#include "uslibs/tools.h"
#include "uslibs/array.h"
#include "uslibs/ring.h"
@@ -185,12 +186,12 @@ int us_audio_get_encoded(us_audio_s *audio, u8 *data, uz *size, u64 *pts) {
}
const int ri = us_ring_consumer_acquire(audio->enc_ring, 0.1);
if (ri < 0) {
return -2;
return US_ERROR_NO_DATA;
}
const _enc_buffer_s *const buf = audio->enc_ring->items[ri];
if (*size < buf->used) {
us_ring_consumer_release(audio->enc_ring, ri);
return -3;
return US_ERROR_NO_DATA;
}
memcpy(data, buf->data, buf->used);
*size = buf->used;

View File

@@ -34,7 +34,7 @@
#include "rtp.h"
typedef struct us_janus_client_sx {
typedef struct {
janus_callbacks *gw;
janus_plugin_session *session;
atomic_bool transmit;
@@ -48,7 +48,7 @@ typedef struct us_janus_client_sx {
us_ring_s *video_ring;
us_ring_s *audio_ring;
US_LIST_STRUCT(struct us_janus_client_sx);
US_LIST_DECLARE;
} us_janus_client_s;

View File

@@ -27,6 +27,7 @@
#include <linux/videodev2.h>
#include "uslibs/types.h"
#include "uslibs/errors.h"
#include "uslibs/tools.h"
#include "uslibs/frame.h"
#include "uslibs/memsinksh.h"
@@ -54,7 +55,7 @@ int us_memsink_fd_wait_frame(int fd, us_memsink_shared_s *mem, u64 last_id) {
}
usleep(1000); // lock_polling
} while (now_ts < deadline_ts);
return -2;
return US_ERROR_NO_DATA;
}
int us_memsink_fd_get_frame(int fd, us_memsink_shared_s *mem, us_frame_s *frame, u64 *frame_id, bool key_required) {

View File

@@ -37,6 +37,7 @@
#include "uslibs/types.h"
#include "uslibs/const.h"
#include "uslibs/errors.h"
#include "uslibs/tools.h"
#include "uslibs/threading.h"
#include "uslibs/list.h"
@@ -178,7 +179,7 @@ static void *_video_sink_thread(void *arg) {
if (ri >= 0 && frame->key) {
atomic_store(&_g_key_required, false);
}
} else if (waited != -2) {
} else if (waited != US_ERROR_NO_DATA) {
goto close_memsink;
}
}

1
janus/src/uslibs/errors.h Symbolic link
View File

@@ -0,0 +1 @@
../../../src/libs/errors.h

View File

@@ -1,6 +1,6 @@
.\" Manpage for ustreamer-dump.
.\" Open an issue or pull request to https://github.com/pikvm/ustreamer to correct errors or typos
.TH USTREAMER-DUMP 1 "version 6.8" "January 2021"
.TH USTREAMER-DUMP 1 "version 6.10" "January 2021"
.SH NAME
ustreamer-dump \- Dump uStreamer's memory sink to file

View File

@@ -1,6 +1,6 @@
.\" Manpage for ustreamer.
.\" Open an issue or pull request to https://github.com/pikvm/ustreamer to correct errors or typos
.TH USTREAMER 1 "version 6.8" "November 2020"
.TH USTREAMER 1 "version 6.10" "November 2020"
.SH NAME
ustreamer \- stream MJPEG video from any V4L2 device to the network

View File

@@ -3,7 +3,7 @@
pkgname=ustreamer
pkgver=6.8
pkgver=6.10
pkgrel=1
pkgdesc="Lightweight and fast MJPEG-HTTP streamer"
url="https://github.com/pikvm/ustreamer"

View File

@@ -6,7 +6,7 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=ustreamer
PKG_VERSION:=6.8
PKG_VERSION:=6.10
PKG_RELEASE:=1
PKG_MAINTAINER:=Maxim Devaev <mdevaev@gmail.com>

View File

@@ -7,7 +7,8 @@ PY ?= python3
# =====
all:
all: root
root: $(shell find src -type f,l)
$(info == PY_BUILD ustreamer-*.so)
rm -rf root
$(ECHO) $(PY) -m build --skip-dependency-check --no-isolation

View File

@@ -17,7 +17,7 @@ def _find_sources(suffix: str) -> list[str]:
if __name__ == "__main__":
setup(
name="ustreamer",
version="6.8",
version="6.10",
description="uStreamer tools",
author="Maxim Devaev",
author_email="mdevaev@gmail.com",

1
python/src/uslibs/errors.h Symbolic link
View File

@@ -0,0 +1 @@
../../../src/libs/errors.h

View File

@@ -14,6 +14,7 @@
#include <Python.h>
#include "uslibs/types.h"
#include "uslibs/errors.h"
#include "uslibs/tools.h"
#include "uslibs/frame.h"
#include "uslibs/memsinksh.h"
@@ -175,9 +176,9 @@ static int _wait_frame(_MemsinkObject *self) {
if (PyErr_CheckSignals() < 0) {
return -1;
}
} while (now_ts < deadline_ts);
return -2;
return US_ERROR_NO_DATA;
}
static PyObject *_MemsinkObject_wait_frame(_MemsinkObject *self, PyObject *args, PyObject *kwargs) {
@@ -194,7 +195,7 @@ static PyObject *_MemsinkObject_wait_frame(_MemsinkObject *self, PyObject *args,
switch (_wait_frame(self)) {
case 0: break;
case -2: Py_RETURN_NONE;
case US_ERROR_NO_DATA: Py_RETURN_NONE;
default: return NULL;
}

View File

@@ -31,10 +31,12 @@
#include <assert.h>
#include "../libs/const.h"
#include "../libs/errors.h"
#include "../libs/tools.h"
#include "../libs/logging.h"
#include "../libs/frame.h"
#include "../libs/memsink.h"
#include "../libs/fpsi.h"
#include "../libs/signal.h"
#include "../libs/options.h"
@@ -220,26 +222,22 @@ static int _dump_sink(
const useconds_t interval_us = interval * 1000000;
us_frame_s *frame = us_frame_init();
us_fpsi_s *fpsi = us_fpsi_init("SINK", false);
us_memsink_s *sink = NULL;
if ((sink = us_memsink_init("input", sink_name, false, 0, false, 0, sink_timeout)) == NULL) {
if ((sink = us_memsink_init_opened("input", sink_name, false, 0, false, 0, sink_timeout)) == NULL) {
goto error;
}
unsigned fps = 0;
unsigned fps_accum = 0;
long long fps_second = 0;
long double last_ts = 0;
while (!_g_stop) {
bool key_requested;
const int error = us_memsink_client_get(sink, frame, &key_requested, key_required);
if (error == 0) {
const int got = us_memsink_client_get(sink, frame, &key_requested, key_required);
if (got == 0) {
key_required = false;
const long double now = us_get_now_monotonic();
const long long now_second = us_floor_ms(now);
char fourcc_str[8];
US_LOG_VERBOSE("Frame: %s - %ux%u -- online=%d, key=%d, kr=%d, gop=%u, latency=%.3Lf, backlog=%.3Lf, size=%zu",
@@ -253,13 +251,7 @@ static int _dump_sink(
US_LOG_DEBUG(" stride=%u, grab_ts=%.3Lf, encode_begin_ts=%.3Lf, encode_end_ts=%.3Lf",
frame->stride, frame->grab_ts, frame->encode_begin_ts, frame->encode_end_ts);
if (now_second != fps_second) {
fps = fps_accum;
fps_accum = 0;
fps_second = now_second;
US_LOG_PERF_FPS("A new second has come; captured_fps=%u", fps);
}
fps_accum += 1;
us_fpsi_update(fpsi, true, NULL);
if (ctx->v_output != NULL) {
ctx->write(ctx->v_output, frame);
@@ -275,7 +267,7 @@ static int _dump_sink(
if (interval_us > 0) {
usleep(interval_us);
}
} else if (error == -2) {
} else if (got == US_ERROR_NO_DATA) {
usleep(1000);
} else {
goto error;
@@ -286,6 +278,7 @@ static int _dump_sink(
error:
US_DELETE(sink, us_memsink_destroy);
us_fpsi_destroy(fpsi);
us_frame_destroy(frame);
US_LOG_INFO("Bye-bye");
return retval;

View File

@@ -41,6 +41,7 @@
#include <linux/v4l2-controls.h>
#include "types.h"
#include "errors.h"
#include "tools.h"
#include "array.h"
#include "logging.h"
@@ -111,11 +112,11 @@ static const char *_standard_to_string(v4l2_std_id standard);
static const char *_io_method_to_string_supported(enum v4l2_memory io_method);
#define _D_LOG_ERROR(x_msg, ...) US_LOG_ERROR("CAP: " x_msg, ##__VA_ARGS__)
#define _D_LOG_PERROR(x_msg, ...) US_LOG_PERROR("CAP: " x_msg, ##__VA_ARGS__)
#define _D_LOG_INFO(x_msg, ...) US_LOG_INFO("CAP: " x_msg, ##__VA_ARGS__)
#define _D_LOG_VERBOSE(x_msg, ...) US_LOG_VERBOSE("CAP: " x_msg, ##__VA_ARGS__)
#define _D_LOG_DEBUG(x_msg, ...) US_LOG_DEBUG("CAP: " x_msg, ##__VA_ARGS__)
#define _LOG_ERROR(x_msg, ...) US_LOG_ERROR("CAP: " x_msg, ##__VA_ARGS__)
#define _LOG_PERROR(x_msg, ...) US_LOG_PERROR("CAP: " x_msg, ##__VA_ARGS__)
#define _LOG_INFO(x_msg, ...) US_LOG_INFO("CAP: " x_msg, ##__VA_ARGS__)
#define _LOG_VERBOSE(x_msg, ...) US_LOG_VERBOSE("CAP: " x_msg, ##__VA_ARGS__)
#define _LOG_DEBUG(x_msg, ...) US_LOG_DEBUG("CAP: " x_msg, ##__VA_ARGS__)
us_capture_s *us_capture_init(void) {
@@ -175,32 +176,31 @@ int us_capture_open(us_capture_s *cap) {
us_capture_runtime_s *const run = cap->run;
if (access(cap->path, R_OK | W_OK) < 0) {
if (run->open_error_reported != -errno) {
run->open_error_reported = -errno; // Don't confuse it with __LINE__
US_ONCE_FOR(run->open_error_once, -errno, {
US_LOG_PERROR("No access to capture device");
}
goto tmp_error;
});
goto error_no_device;
}
_D_LOG_DEBUG("Opening capture device ...");
_LOG_DEBUG("Opening capture device ...");
if ((run->fd = open(cap->path, O_RDWR | O_NONBLOCK)) < 0) {
_D_LOG_PERROR("Can't capture open device");
_LOG_PERROR("Can't open capture device");
goto error;
}
_D_LOG_DEBUG("Capture device fd=%d opened", run->fd);
_LOG_DEBUG("Capture device fd=%d opened", run->fd);
if (cap->dv_timings && cap->persistent) {
_D_LOG_DEBUG("Probing DV-timings or QuerySTD ...");
_LOG_DEBUG("Probing DV-timings or QuerySTD ...");
if (_capture_open_dv_timings(cap, false) < 0) {
const int line = __LINE__;
if (run->open_error_reported != line) {
run->open_error_reported = line;
_D_LOG_ERROR("No signal from source");
}
goto tmp_error;
US_ONCE_FOR(run->open_error_once, __LINE__, {
_LOG_ERROR("No signal from source");
});
goto error_no_signal;
}
}
US_LOG_INFO("Using V4L2 device: %s", cap->path);
if (_capture_open_check_cap(cap) < 0) {
goto error;
}
@@ -232,21 +232,25 @@ int us_capture_open(us_capture_s *cap) {
enum v4l2_buf_type type = run->capture_type;
if (us_xioctl(run->fd, VIDIOC_STREAMON, &type) < 0) {
_D_LOG_PERROR("Can't start capturing");
_LOG_PERROR("Can't start capturing");
goto error;
}
run->streamon = true;
run->open_error_reported = 0;
_D_LOG_INFO("Capturing started");
run->open_error_once = 0;
_LOG_INFO("Capturing started");
return 0;
tmp_error:
error_no_device:
us_capture_close(cap);
return -2;
return US_ERROR_NO_DEVICE;
error_no_signal:
us_capture_close(cap);
return US_ERROR_NO_DATA;
error:
run->open_error_reported = 0;
run->open_error_once = 0;
us_capture_close(cap);
return -1;
}
@@ -258,17 +262,17 @@ void us_capture_close(us_capture_s *cap) {
if (run->streamon) {
say = true;
_D_LOG_DEBUG("Calling VIDIOC_STREAMOFF ...");
_LOG_DEBUG("Calling VIDIOC_STREAMOFF ...");
enum v4l2_buf_type type = run->capture_type;
if (us_xioctl(run->fd, VIDIOC_STREAMOFF, &type) < 0) {
_D_LOG_PERROR("Can't stop capturing");
_LOG_PERROR("Can't stop capturing");
}
run->streamon = false;
}
if (run->bufs != NULL) {
say = true;
_D_LOG_DEBUG("Releasing HW buffers ...");
_LOG_DEBUG("Releasing HW buffers ...");
for (uint index = 0; index < run->n_bufs; ++index) {
us_capture_hwbuf_s *hw = &run->bufs[index];
@@ -277,7 +281,7 @@ void us_capture_close(us_capture_s *cap) {
if (cap->io_method == V4L2_MEMORY_MMAP) {
if (hw->raw.allocated > 0 && hw->raw.data != NULL) {
if (munmap(hw->raw.data, hw->raw.allocated) < 0) {
_D_LOG_PERROR("Can't unmap HW buffer=%u", index);
_LOG_PERROR("Can't unmap HW buffer=%u", index);
}
}
} else { // V4L2_MEMORY_USERPTR
@@ -295,17 +299,17 @@ void us_capture_close(us_capture_s *cap) {
US_CLOSE_FD(run->fd);
if (say) {
_D_LOG_INFO("Capturing stopped");
_LOG_INFO("Capturing stopped");
}
}
int us_capture_grab_buffer(us_capture_s *cap, us_capture_hwbuf_s **hw) {
int us_capture_hwbuf_grab(us_capture_s *cap, us_capture_hwbuf_s **hw) {
// Это сложная функция, которая делает сразу много всего, чтобы получить новый фрейм.
// - Вызывается _capture_wait_buffer() с select() внутри, чтобы подождать новый фрейм
// или эвент V4L2. Обработка эвентов более приоритетна, чем кадров.
// - Если есть новые фреймы, то пропустить их все, пока не закончатся и вернуть
// самый-самый свежий, содержащий при этом валидные данные.
// - Если таковых не нашлось, вернуть -2.
// - Если таковых не нашлось, вернуть US_ERROR_NO_DATA.
// - Ошибка -1 возвращается при любых сбоях.
if (_capture_wait_buffer(cap) < 0) {
@@ -327,7 +331,7 @@ int us_capture_grab_buffer(us_capture_s *cap, us_capture_hwbuf_s **hw) {
uint skipped = 0;
bool broken = false;
_D_LOG_DEBUG("Grabbing hw buffer ...");
_LOG_DEBUG("Grabbing hw buffer ...");
do {
struct v4l2_buffer new = {0};
@@ -343,7 +347,7 @@ int us_capture_grab_buffer(us_capture_s *cap, us_capture_hwbuf_s **hw) {
if (new_got) {
if (new.index >= run->n_bufs) {
_D_LOG_ERROR("V4L2 error: grabbed invalid HW buffer=%u, n_bufs=%u", new.index, run->n_bufs);
_LOG_ERROR("V4L2 error: grabbed invalid HW buffer=%u, n_bufs=%u", new.index, run->n_bufs);
return -1;
}
@@ -351,7 +355,7 @@ int us_capture_grab_buffer(us_capture_s *cap, us_capture_hwbuf_s **hw) {
# define FRAME_DATA(x_buf) run->bufs[x_buf.index].raw.data
if (GRABBED(new)) {
_D_LOG_ERROR("V4L2 error: grabbed HW buffer=%u is already used", new.index);
_LOG_ERROR("V4L2 error: grabbed HW buffer=%u is already used", new.index);
return -1;
}
GRABBED(new) = true;
@@ -362,9 +366,9 @@ int us_capture_grab_buffer(us_capture_s *cap, us_capture_hwbuf_s **hw) {
broken = !_capture_is_buffer_valid(cap, &new, FRAME_DATA(new));
if (broken) {
_D_LOG_DEBUG("Releasing HW buffer=%u (broken frame) ...", new.index);
_LOG_DEBUG("Releasing HW buffer=%u (broken frame) ...", new.index);
if (us_xioctl(run->fd, VIDIOC_QBUF, &new) < 0) {
_D_LOG_PERROR("Can't release HW buffer=%u (broken frame)", new.index);
_LOG_PERROR("Can't release HW buffer=%u (broken frame)", new.index);
return -1;
}
GRABBED(new) = false;
@@ -373,7 +377,7 @@ int us_capture_grab_buffer(us_capture_s *cap, us_capture_hwbuf_s **hw) {
if (buf_got) {
if (us_xioctl(run->fd, VIDIOC_QBUF, &buf) < 0) {
_D_LOG_PERROR("Can't release HW buffer=%u (skipped frame)", buf.index);
_LOG_PERROR("Can't release HW buffer=%u (skipped frame)", buf.index);
return -1;
}
GRABBED(buf) = false;
@@ -392,10 +396,10 @@ int us_capture_grab_buffer(us_capture_s *cap, us_capture_hwbuf_s **hw) {
if (buf_got) {
break; // Process any latest valid frame
} else if (broken) {
return -2; // If we have only broken frames on this capture session
return US_ERROR_NO_DATA; // If we have only broken frames on this capture session
}
}
_D_LOG_PERROR("Can't grab HW buffer");
_LOG_PERROR("Can't grab HW buffer");
return -1;
}
} while (true);
@@ -412,29 +416,29 @@ int us_capture_grab_buffer(us_capture_s *cap, us_capture_hwbuf_s **hw) {
_v4l2_buffer_copy(&buf, &(*hw)->buf);
(*hw)->raw.grab_ts = (ldf)((buf.timestamp.tv_sec * (u64)1000) + (buf.timestamp.tv_usec / 1000)) / 1000;
_D_LOG_DEBUG("Grabbed HW buffer=%u: bytesused=%u, grab_ts=%.3Lf, latency=%.3Lf, skipped=%u",
_LOG_DEBUG("Grabbed HW buffer=%u: bytesused=%u, grab_ts=%.3Lf, latency=%.3Lf, skipped=%u",
buf.index, buf.bytesused, (*hw)->raw.grab_ts, us_get_now_monotonic() - (*hw)->raw.grab_ts, skipped);
return buf.index;
}
int us_capture_release_buffer(us_capture_s *cap, us_capture_hwbuf_s *hw) {
int us_capture_hwbuf_release(us_capture_s *cap, us_capture_hwbuf_s *hw) {
assert(atomic_load(&hw->refs) == 0);
const uint index = hw->buf.index;
_D_LOG_DEBUG("Releasing HW buffer=%u ...", index);
_LOG_DEBUG("Releasing HW buffer=%u ...", index);
if (us_xioctl(cap->run->fd, VIDIOC_QBUF, &hw->buf) < 0) {
_D_LOG_PERROR("Can't release HW buffer=%u", index);
_LOG_PERROR("Can't release HW buffer=%u", index);
return -1;
}
hw->grabbed = false;
_D_LOG_DEBUG("HW buffer=%u released", index);
_LOG_DEBUG("HW buffer=%u released", index);
return 0;
}
void us_capture_buffer_incref(us_capture_hwbuf_s *hw) {
void us_capture_hwbuf_incref(us_capture_hwbuf_s *hw) {
atomic_fetch_add(&hw->refs, 1);
}
void us_capture_buffer_decref(us_capture_hwbuf_s *hw) {
void us_capture_hwbuf_decref(us_capture_hwbuf_s *hw) {
atomic_fetch_sub(&hw->refs, 1);
}
@@ -455,7 +459,7 @@ int _capture_wait_buffer(us_capture_s *cap) {
timeout.tv_sec = cap->timeout;
timeout.tv_usec = 0;
_D_LOG_DEBUG("Calling select() on video device ...");
_LOG_DEBUG("Calling select() on video device ...");
bool has_read = false;
bool has_error = false;
@@ -464,15 +468,15 @@ int _capture_wait_buffer(us_capture_s *cap) {
has_read = FD_ISSET(run->fd, &read_fds);
has_error = FD_ISSET(run->fd, &error_fds);
}
_D_LOG_DEBUG("Device select() --> %d; has_read=%d, has_error=%d", selected, has_read, has_error);
_LOG_DEBUG("Device select() --> %d; has_read=%d, has_error=%d", selected, has_read, has_error);
if (selected < 0) {
if (errno != EINTR) {
_D_LOG_PERROR("Device select() error");
_LOG_PERROR("Device select() error");
}
return -1;
} else if (selected == 0) {
_D_LOG_ERROR("Device select() timeout");
_LOG_ERROR("Device select() timeout");
return -1;
} else {
if (has_error && _capture_consume_event(cap) < 0) {
@@ -485,15 +489,15 @@ int _capture_wait_buffer(us_capture_s *cap) {
static int _capture_consume_event(us_capture_s *cap) {
struct v4l2_event event;
if (us_xioctl(cap->run->fd, VIDIOC_DQEVENT, &event) < 0) {
_D_LOG_PERROR("Can't consume V4L2 event");
_LOG_PERROR("Can't consume V4L2 event");
return -1;
}
switch (event.type) {
case V4L2_EVENT_SOURCE_CHANGE:
_D_LOG_INFO("Got V4L2_EVENT_SOURCE_CHANGE: Source changed");
_LOG_INFO("Got V4L2_EVENT_SOURCE_CHANGE: Source changed");
return -1;
case V4L2_EVENT_EOS:
_D_LOG_INFO("Got V4L2_EVENT_EOS: End of stream");
_LOG_INFO("Got V4L2_EVENT_EOS: End of stream");
return -1;
}
return 0;
@@ -516,7 +520,7 @@ bool _capture_is_buffer_valid(us_capture_s *cap, const struct v4l2_buffer *buf,
// For example a VGA (640x480) webcam frame is normally >= 8kByte large,
// corrupted frames are smaller.
if (buf->bytesused < cap->min_frame_size) {
_D_LOG_DEBUG("Dropped too small frame, assuming it was broken: buffer=%u, bytesused=%u",
_LOG_DEBUG("Dropped too small frame, assuming it was broken: buffer=%u, bytesused=%u",
buf->index, buf->bytesused);
return false;
}
@@ -532,7 +536,7 @@ bool _capture_is_buffer_valid(us_capture_s *cap, const struct v4l2_buffer *buf,
if (us_is_jpeg(cap->run->format)) {
if (buf->bytesused < 125) {
// https://stackoverflow.com/questions/2253404/what-is-the-smallest-valid-jpeg-file-size-in-bytes
_D_LOG_DEBUG("Discarding invalid frame, too small to be a valid JPEG: bytesused=%u", buf->bytesused);
_LOG_DEBUG("Discarding invalid frame, too small to be a valid JPEG: bytesused=%u", buf->bytesused);
return false;
}
@@ -540,7 +544,7 @@ bool _capture_is_buffer_valid(us_capture_s *cap, const struct v4l2_buffer *buf,
const u8 *const eoi_ptr = end_ptr - 2;
const u16 eoi_marker = (((u16)(eoi_ptr[0]) << 8) | eoi_ptr[1]);
if (eoi_marker != 0xFFD9 && eoi_marker != 0xD900 && eoi_marker != 0x0000) {
_D_LOG_DEBUG("Discarding truncated JPEG frame: eoi_marker=0x%04x, bytesused=%u", eoi_marker, buf->bytesused);
_LOG_DEBUG("Discarding truncated JPEG frame: eoi_marker=0x%04x, bytesused=%u", eoi_marker, buf->bytesused);
return false;
}
}
@@ -552,47 +556,47 @@ static int _capture_open_check_cap(us_capture_s *cap) {
us_capture_runtime_s *const run = cap->run;
struct v4l2_capability cpb = {0};
_D_LOG_DEBUG("Querying device capabilities ...");
_LOG_DEBUG("Querying device capabilities ...");
if (us_xioctl(run->fd, VIDIOC_QUERYCAP, &cpb) < 0) {
_D_LOG_PERROR("Can't query device capabilities");
_LOG_PERROR("Can't query device capabilities");
return -1;
}
if (cpb.capabilities & V4L2_CAP_VIDEO_CAPTURE) {
run->capture_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
run->capture_mplane = false;
_D_LOG_INFO("Using capture type: single-planar");
_LOG_INFO("Using capture type: single-planar");
} else if (cpb.capabilities & V4L2_CAP_VIDEO_CAPTURE_MPLANE) {
run->capture_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
run->capture_mplane = true;
_D_LOG_INFO("Using capture type: multi-planar");
_LOG_INFO("Using capture type: multi-planar");
} else {
_D_LOG_ERROR("Video capture is not supported by device");
_LOG_ERROR("Video capture is not supported by device");
return -1;
}
if (!(cpb.capabilities & V4L2_CAP_STREAMING)) {
_D_LOG_ERROR("Device doesn't support streaming IO");
_LOG_ERROR("Device doesn't support streaming IO");
return -1;
}
if (!run->capture_mplane) {
int input = cap->input; // Needs a pointer to int for ioctl()
_D_LOG_INFO("Using input channel: %d", input);
_LOG_INFO("Using input channel: %d", input);
if (us_xioctl(run->fd, VIDIOC_S_INPUT, &input) < 0) {
_D_LOG_ERROR("Can't set input channel");
_LOG_ERROR("Can't set input channel");
return -1;
}
}
if (cap->standard != V4L2_STD_UNKNOWN) {
_D_LOG_INFO("Using TV standard: %s", _standard_to_string(cap->standard));
_LOG_INFO("Using TV standard: %s", _standard_to_string(cap->standard));
if (us_xioctl(run->fd, VIDIOC_S_STD, &cap->standard) < 0) {
_D_LOG_ERROR("Can't set video standard");
_LOG_ERROR("Can't set video standard");
return -1;
}
} else {
_D_LOG_DEBUG("Using TV standard: DEFAULT");
_LOG_DEBUG("Using TV standard: DEFAULT");
}
return 0;
}
@@ -605,7 +609,7 @@ static int _capture_open_dv_timings(us_capture_s *cap, bool apply) {
int dv_errno = 0;
struct v4l2_dv_timings dv = {0};
_D_LOG_DEBUG("Querying DV-timings (apply=%u) ...", apply);
_LOG_DEBUG("Querying DV-timings (apply=%u) ...", apply);
if (us_xioctl(run->fd, VIDIOC_QUERY_DV_TIMINGS, &dv) < 0) {
// TC358743 errors here (see in the kernel: drivers/media/i2c/tc358743.c):
// - ENOLINK: No valid signal (SYS_STATUS & MASK_S_TMDS)
@@ -623,18 +627,18 @@ static int _capture_open_dv_timings(us_capture_s *cap, bool apply) {
const uint vtot = V4L2_DV_BT_FRAME_HEIGHT(&dv.bt) / (dv.bt.interlaced ? 2 : 1);
const uint fps = ((htot * vtot) > 0 ? ((100 * (u64)dv.bt.pixelclock)) / (htot * vtot) : 0);
hz = (fps / 100) + (fps % 100) / 100.0;
_D_LOG_INFO("Detected DV-timings: %ux%u%s%.02f, pixclk=%llu, vsync=%u, hsync=%u",
_LOG_INFO("Detected DV-timings: %ux%u%s%.02f, pixclk=%llu, vsync=%u, hsync=%u",
dv.bt.width, dv.bt.height, (dv.bt.interlaced ? "i" : "p"), hz,
(ull)dv.bt.pixelclock, dv.bt.vsync, dv.bt.hsync); // See #11 about %llu
} else {
_D_LOG_INFO("Detected DV-timings: %ux%u, pixclk=%llu, vsync=%u, hsync=%u",
_LOG_INFO("Detected DV-timings: %ux%u, pixclk=%llu, vsync=%u, hsync=%u",
dv.bt.width, dv.bt.height,
(ull)dv.bt.pixelclock, dv.bt.vsync, dv.bt.hsync);
}
_D_LOG_DEBUG("Applying DV-timings ...");
_LOG_DEBUG("Applying DV-timings ...");
if (us_xioctl(run->fd, VIDIOC_S_DV_TIMINGS, &dv) < 0) {
_D_LOG_PERROR("Failed to apply DV-timings");
_LOG_PERROR("Failed to apply DV-timings");
return -1;
}
if (_capture_apply_resolution(cap, dv.bt.width, dv.bt.height, hz) < 0) {
@@ -643,12 +647,12 @@ static int _capture_open_dv_timings(us_capture_s *cap, bool apply) {
goto subscribe;
querystd:
_D_LOG_DEBUG("Failed to query DV-timings, trying QuerySTD ...");
_LOG_DEBUG("Failed to query DV-timings, trying QuerySTD ...");
if (us_xioctl(run->fd, VIDIOC_QUERYSTD, &cap->standard) < 0) {
if (apply) {
char *std_error = us_errno_to_string(errno); // Read the errno first
char *dv_error = us_errno_to_string(dv_errno);
_D_LOG_ERROR("Failed to query DV-timings (%s) and QuerySTD (%s)", dv_error, std_error);
_LOG_ERROR("Failed to query DV-timings (%s) and QuerySTD (%s)", dv_error, std_error);
free(dv_error);
free(std_error);
}
@@ -657,17 +661,17 @@ querystd:
goto probe_only;
}
if (us_xioctl(run->fd, VIDIOC_S_STD, &cap->standard) < 0) {
_D_LOG_PERROR("Can't set apply standard: %s", _standard_to_string(cap->standard));
_LOG_PERROR("Can't set apply standard: %s", _standard_to_string(cap->standard));
return -1;
}
_D_LOG_DEBUG("Applied new video standard: %s", _standard_to_string(cap->standard));
_LOG_DEBUG("Applied new video standard: %s", _standard_to_string(cap->standard));
subscribe:
; // Empty statement for the goto label above
struct v4l2_event_subscription sub = {.type = V4L2_EVENT_SOURCE_CHANGE};
_D_LOG_DEBUG("Subscribing to V4L2_EVENT_SOURCE_CHANGE ...")
_LOG_DEBUG("Subscribing to V4L2_EVENT_SOURCE_CHANGE ...")
if (us_xioctl(cap->run->fd, VIDIOC_SUBSCRIBE_EVENT, &sub) < 0) {
_D_LOG_PERROR("Can't subscribe to V4L2_EVENT_SOURCE_CHANGE");
_LOG_PERROR("Can't subscribe to V4L2_EVENT_SOURCE_CHANGE");
return -1;
}
@@ -698,15 +702,15 @@ static int _capture_open_format(us_capture_s *cap, bool first) {
}
// Set format
_D_LOG_DEBUG("Probing device format=%s, stride=%u, resolution=%ux%u ...",
_LOG_DEBUG("Probing device format=%s, stride=%u, resolution=%ux%u ...",
_format_to_string_supported(cap->format), stride, run->width, run->height);
if (us_xioctl(run->fd, VIDIOC_S_FMT, &fmt) < 0) {
_D_LOG_PERROR("Can't set device format");
_LOG_PERROR("Can't set device format");
return -1;
}
if (fmt.type != run->capture_type) {
_D_LOG_ERROR("Capture format mismatch, please report to the developer");
_LOG_ERROR("Capture format mismatch, please report to the developer");
return -1;
}
@@ -716,7 +720,7 @@ static int _capture_open_format(us_capture_s *cap, bool first) {
// Check resolution
bool retry = false;
if (FMT(width) != run->width || FMT(height) != run->height) {
_D_LOG_ERROR("Requested resolution=%ux%u is unavailable", run->width, run->height);
_LOG_ERROR("Requested resolution=%ux%u is unavailable", run->width, run->height);
retry = true;
}
if (_capture_apply_resolution(cap, FMT(width), FMT(height), run->hz) < 0) {
@@ -725,27 +729,27 @@ static int _capture_open_format(us_capture_s *cap, bool first) {
if (first && retry) {
return _capture_open_format(cap, false);
}
_D_LOG_INFO("Using resolution: %ux%u", run->width, run->height);
_LOG_INFO("Using resolution: %ux%u", run->width, run->height);
// Check format
if (FMT(pixelformat) != cap->format) {
_D_LOG_ERROR("Could not obtain the requested format=%s; driver gave us %s",
_LOG_ERROR("Could not obtain the requested format=%s; driver gave us %s",
_format_to_string_supported(cap->format),
_format_to_string_supported(FMT(pixelformat)));
char *format_str;
if ((format_str = (char*)_format_to_string_nullable(FMT(pixelformat))) != NULL) {
_D_LOG_INFO("Falling back to format=%s", format_str);
_LOG_INFO("Falling back to format=%s", format_str);
} else {
char fourcc_str[8];
_D_LOG_ERROR("Unsupported format=%s (fourcc)",
_LOG_ERROR("Unsupported format=%s (fourcc)",
us_fourcc_to_string(FMT(pixelformat), fourcc_str, 8));
return -1;
}
}
run->format = FMT(pixelformat);
_D_LOG_INFO("Using format: %s", _format_to_string_supported(run->format));
_LOG_INFO("Using format: %s", _format_to_string_supported(run->format));
if (cap->format_swap_rgb) {
// Userspace workaround for TC358743 RGB/BGR bug:
@@ -756,7 +760,7 @@ static int _capture_open_format(us_capture_s *cap, bool first) {
case V4L2_PIX_FMT_BGR24: swapped = V4L2_PIX_FMT_RGB24; break;
}
if (swapped > 0) {
_D_LOG_INFO("Using format swap: %s -> %s",
_LOG_INFO("Using format swap: %s -> %s",
_format_to_string_supported(run->format),
_format_to_string_supported(swapped));
run->format = swapped;
@@ -778,18 +782,18 @@ static void _capture_open_hw_fps(us_capture_s *cap) {
run->hw_fps = 0;
struct v4l2_streamparm setfps = {.type = run->capture_type};
_D_LOG_DEBUG("Querying HW FPS ...");
_LOG_DEBUG("Querying HW FPS ...");
if (us_xioctl(run->fd, VIDIOC_G_PARM, &setfps) < 0) {
if (errno == ENOTTY) { // Quiet message for TC358743
_D_LOG_INFO("Querying HW FPS changing is not supported");
_LOG_INFO("Querying HW FPS changing is not supported");
} else {
_D_LOG_PERROR("Can't query HW FPS changing");
_LOG_PERROR("Can't query HW FPS changing");
}
return;
}
if (!(setfps.parm.capture.capability & V4L2_CAP_TIMEPERFRAME)) {
_D_LOG_INFO("Changing HW FPS is not supported");
_LOG_INFO("Changing HW FPS is not supported");
return;
}
@@ -801,25 +805,25 @@ static void _capture_open_hw_fps(us_capture_s *cap) {
SETFPS_TPF(denominator) = (cap->desired_fps == 0 ? 255 : cap->desired_fps);
if (us_xioctl(run->fd, VIDIOC_S_PARM, &setfps) < 0) {
_D_LOG_PERROR("Can't set HW FPS");
_LOG_PERROR("Can't set HW FPS");
return;
}
if (SETFPS_TPF(numerator) != 1) {
_D_LOG_ERROR("Invalid HW FPS numerator: %u != 1", SETFPS_TPF(numerator));
_LOG_ERROR("Invalid HW FPS numerator: %u != 1", SETFPS_TPF(numerator));
return;
}
if (SETFPS_TPF(denominator) == 0) { // Не знаю, бывает ли так, но пускай на всякий случай
_D_LOG_ERROR("Invalid HW FPS denominator: 0");
_LOG_ERROR("Invalid HW FPS denominator: 0");
return;
}
run->hw_fps = SETFPS_TPF(denominator);
if (cap->desired_fps != run->hw_fps) {
_D_LOG_INFO("Using HW FPS: %u -> %u (coerced)", cap->desired_fps, run->hw_fps);
_LOG_INFO("Using HW FPS: %u -> %u (coerced)", cap->desired_fps, run->hw_fps);
} else {
_D_LOG_INFO("Using HW FPS: %u", run->hw_fps);
_LOG_INFO("Using HW FPS: %u", run->hw_fps);
}
# undef SETFPS_TPF
@@ -831,11 +835,11 @@ static void _capture_open_jpeg_quality(us_capture_s *cap) {
if (us_is_jpeg(run->format)) {
struct v4l2_jpegcompression comp = {0};
if (us_xioctl(run->fd, VIDIOC_G_JPEGCOMP, &comp) < 0) {
_D_LOG_ERROR("Device doesn't support setting of HW encoding quality parameters");
_LOG_ERROR("Device doesn't support setting of HW encoding quality parameters");
} else {
comp.quality = cap->jpeg_quality;
if (us_xioctl(run->fd, VIDIOC_S_JPEGCOMP, &comp) < 0) {
_D_LOG_ERROR("Can't change MJPEG quality for JPEG source with HW pass-through encoder");
_LOG_ERROR("Can't change MJPEG quality for JPEG source with HW pass-through encoder");
} else {
quality = cap->jpeg_quality;
}
@@ -845,7 +849,7 @@ static void _capture_open_jpeg_quality(us_capture_s *cap) {
}
static int _capture_open_io_method(us_capture_s *cap) {
_D_LOG_INFO("Using IO method: %s", _io_method_to_string_supported(cap->io_method));
_LOG_INFO("Using IO method: %s", _io_method_to_string_supported(cap->io_method));
switch (cap->io_method) {
case V4L2_MEMORY_MMAP: return _capture_open_io_method_mmap(cap);
case V4L2_MEMORY_USERPTR: return _capture_open_io_method_userptr(cap);
@@ -862,20 +866,20 @@ static int _capture_open_io_method_mmap(us_capture_s *cap) {
.type = run->capture_type,
.memory = V4L2_MEMORY_MMAP,
};
_D_LOG_DEBUG("Requesting %u device buffers for MMAP ...", req.count);
_LOG_DEBUG("Requesting %u device buffers for MMAP ...", req.count);
if (us_xioctl(run->fd, VIDIOC_REQBUFS, &req) < 0) {
_D_LOG_PERROR("Device '%s' doesn't support MMAP method", cap->path);
_LOG_PERROR("Device '%s' doesn't support MMAP method", cap->path);
return -1;
}
if (req.count < 1) {
_D_LOG_ERROR("Insufficient buffer memory: %u", req.count);
_LOG_ERROR("Insufficient buffer memory: %u", req.count);
return -1;
} else {
_D_LOG_INFO("Requested %u device buffers, got %u", cap->n_bufs, req.count);
_LOG_INFO("Requested %u device buffers, got %u", cap->n_bufs, req.count);
}
_D_LOG_DEBUG("Allocating device buffers ...");
_LOG_DEBUG("Allocating device buffers ...");
US_CALLOC(run->bufs, req.count);
@@ -890,9 +894,9 @@ static int _capture_open_io_method_mmap(us_capture_s *cap) {
buf.length = VIDEO_MAX_PLANES;
}
_D_LOG_DEBUG("Calling us_xioctl(VIDIOC_QUERYBUF) for device buffer=%u ...", run->n_bufs);
_LOG_DEBUG("Calling us_xioctl(VIDIOC_QUERYBUF) for device buffer=%u ...", run->n_bufs);
if (us_xioctl(run->fd, VIDIOC_QUERYBUF, &buf) < 0) {
_D_LOG_PERROR("Can't VIDIOC_QUERYBUF");
_LOG_PERROR("Can't VIDIOC_QUERYBUF");
return -1;
}
@@ -901,13 +905,13 @@ static int _capture_open_io_method_mmap(us_capture_s *cap) {
const uz buf_size = (run->capture_mplane ? buf.m.planes[0].length : buf.length);
const off_t buf_offset = (run->capture_mplane ? buf.m.planes[0].m.mem_offset : buf.m.offset);
_D_LOG_DEBUG("Mapping device buffer=%u ...", run->n_bufs);
_LOG_DEBUG("Mapping device buffer=%u ...", run->n_bufs);
if ((hw->raw.data = mmap(
NULL, buf_size,
PROT_READ | PROT_WRITE, MAP_SHARED,
run->fd, buf_offset
)) == MAP_FAILED) {
_D_LOG_PERROR("Can't map device buffer=%u", run->n_bufs);
_LOG_PERROR("Can't map device buffer=%u", run->n_bufs);
return -1;
}
assert(hw->raw.data != NULL);
@@ -930,20 +934,20 @@ static int _capture_open_io_method_userptr(us_capture_s *cap) {
.type = run->capture_type,
.memory = V4L2_MEMORY_USERPTR,
};
_D_LOG_DEBUG("Requesting %u device buffers for USERPTR ...", req.count);
_LOG_DEBUG("Requesting %u device buffers for USERPTR ...", req.count);
if (us_xioctl(run->fd, VIDIOC_REQBUFS, &req) < 0) {
_D_LOG_PERROR("Device '%s' doesn't support USERPTR method", cap->path);
_LOG_PERROR("Device '%s' doesn't support USERPTR method", cap->path);
return -1;
}
if (req.count < 1) {
_D_LOG_ERROR("Insufficient buffer memory: %u", req.count);
_LOG_ERROR("Insufficient buffer memory: %u", req.count);
return -1;
} else {
_D_LOG_INFO("Requested %u device buffers, got %u", cap->n_bufs, req.count);
_LOG_INFO("Requested %u device buffers, got %u", cap->n_bufs, req.count);
}
_D_LOG_DEBUG("Allocating device buffers ...");
_LOG_DEBUG("Allocating device buffers ...");
US_CALLOC(run->bufs, req.count);
@@ -983,9 +987,9 @@ static int _capture_open_queue_buffers(us_capture_s *cap) {
buf.length = run->bufs[index].raw.allocated;
}
_D_LOG_DEBUG("Calling us_xioctl(VIDIOC_QBUF) for buffer=%u ...", index);
_LOG_DEBUG("Calling us_xioctl(VIDIOC_QBUF) for buffer=%u ...", index);
if (us_xioctl(run->fd, VIDIOC_QBUF, &buf) < 0) {
_D_LOG_PERROR("Can't VIDIOC_QBUF");
_LOG_PERROR("Can't VIDIOC_QBUF");
return -1;
}
}
@@ -1000,9 +1004,9 @@ static int _capture_open_export_to_dma(us_capture_s *cap) {
.type = run->capture_type,
.index = index,
};
_D_LOG_DEBUG("Exporting device buffer=%u to DMA ...", index);
_LOG_DEBUG("Exporting device buffer=%u to DMA ...", index);
if (us_xioctl(run->fd, VIDIOC_EXPBUF, &exp) < 0) {
_D_LOG_PERROR("Can't export device buffer=%u to DMA", index);
_LOG_PERROR("Can't export device buffer=%u to DMA", index);
goto error;
}
run->bufs[index].dma_fd = exp.fd;
@@ -1023,7 +1027,7 @@ static int _capture_apply_resolution(us_capture_s *cap, uint width, uint height,
width == 0 || width > US_VIDEO_MAX_WIDTH
|| height == 0 || height > US_VIDEO_MAX_HEIGHT
) {
_D_LOG_ERROR("Requested forbidden resolution=%ux%u: min=1x1, max=%ux%u",
_LOG_ERROR("Requested forbidden resolution=%ux%u: min=1x1, max=%ux%u",
width, height, US_VIDEO_MAX_WIDTH, US_VIDEO_MAX_HEIGHT);
return -1;
}
@@ -1099,7 +1103,7 @@ static int _capture_query_control(
if (us_xioctl(cap->run->fd, VIDIOC_QUERYCTRL, query) < 0 || query->flags & V4L2_CTRL_FLAG_DISABLED) {
if (!quiet) {
_D_LOG_ERROR("Changing control %s is unsupported", name);
_LOG_ERROR("Changing control %s is unsupported", name);
}
return -1;
}
@@ -1112,7 +1116,7 @@ static void _capture_set_control(
if (value < query->minimum || value > query->maximum || value % query->step != 0) {
if (!quiet) {
_D_LOG_ERROR("Invalid value %d of control %s: min=%d, max=%d, default=%d, step=%u",
_LOG_ERROR("Invalid value %d of control %s: min=%d, max=%d, default=%d, step=%u",
value, name, query->minimum, query->maximum, query->default_value, query->step);
}
return;
@@ -1124,10 +1128,10 @@ static void _capture_set_control(
};
if (us_xioctl(cap->run->fd, VIDIOC_S_CTRL, &ctl) < 0) {
if (!quiet) {
_D_LOG_PERROR("Can't set control %s", name);
_LOG_PERROR("Can't set control %s", name);
}
} else if (!quiet) {
_D_LOG_INFO("Applying control %s: %d", name, ctl.value);
_LOG_INFO("Applying control %s: %d", name, ctl.value);
}
}

View File

@@ -67,7 +67,7 @@ typedef struct {
enum v4l2_buf_type capture_type;
bool capture_mplane;
bool streamon;
int open_error_reported;
int open_error_once;
} us_capture_runtime_s;
typedef enum {
@@ -132,8 +132,8 @@ int us_capture_parse_io_method(const char *str);
int us_capture_open(us_capture_s *cap);
void us_capture_close(us_capture_s *cap);
int us_capture_grab_buffer(us_capture_s *cap, us_capture_hwbuf_s **hw);
int us_capture_release_buffer(us_capture_s *cap, us_capture_hwbuf_s *hw);
int us_capture_hwbuf_grab(us_capture_s *cap, us_capture_hwbuf_s **hw);
int us_capture_hwbuf_release(us_capture_s *cap, us_capture_hwbuf_s *hw);
void us_capture_buffer_incref(us_capture_hwbuf_s *hw);
void us_capture_buffer_decref(us_capture_hwbuf_s *hw);
void us_capture_hwbuf_incref(us_capture_hwbuf_s *hw);
void us_capture_hwbuf_decref(us_capture_hwbuf_s *hw);

View File

@@ -26,7 +26,7 @@
#define US_VERSION_MAJOR 6
#define US_VERSION_MINOR 8
#define US_VERSION_MINOR 10
#define US_MAKE_VERSION2(_major, _minor) #_major "." #_minor
#define US_MAKE_VERSION1(_major, _minor) US_MAKE_VERSION2(_major, _minor)

View File

@@ -38,6 +38,7 @@
#include <libdrm/drm.h>
#include "../types.h"
#include "../errors.h"
#include "../tools.h"
#include "../logging.h"
#include "../frame.h"
@@ -58,11 +59,11 @@ static const char *_connector_type_to_string(u32 type);
static float _get_refresh_rate(const drmModeModeInfo *mode);
#define _D_LOG_ERROR(x_msg, ...) US_LOG_ERROR("DRM: " x_msg, ##__VA_ARGS__)
#define _D_LOG_PERROR(x_msg, ...) US_LOG_PERROR("DRM: " x_msg, ##__VA_ARGS__)
#define _D_LOG_INFO(x_msg, ...) US_LOG_INFO("DRM: " x_msg, ##__VA_ARGS__)
#define _D_LOG_VERBOSE(x_msg, ...) US_LOG_VERBOSE("DRM: " x_msg, ##__VA_ARGS__)
#define _D_LOG_DEBUG(x_msg, ...) US_LOG_DEBUG("DRM: " x_msg, ##__VA_ARGS__)
#define _LOG_ERROR(x_msg, ...) US_LOG_ERROR("DRM: " x_msg, ##__VA_ARGS__)
#define _LOG_PERROR(x_msg, ...) US_LOG_PERROR("DRM: " x_msg, ##__VA_ARGS__)
#define _LOG_INFO(x_msg, ...) US_LOG_INFO("DRM: " x_msg, ##__VA_ARGS__)
#define _LOG_VERBOSE(x_msg, ...) US_LOG_VERBOSE("DRM: " x_msg, ##__VA_ARGS__)
#define _LOG_DEBUG(x_msg, ...) US_LOG_DEBUG("DRM: " x_msg, ##__VA_ARGS__)
us_drm_s *us_drm_init(void) {
@@ -71,6 +72,7 @@ us_drm_s *us_drm_init(void) {
run->fd = -1;
run->status_fd = -1;
run->dpms_state = -1;
run->opened = -1;
run->has_vsync = true;
run->exposing_dma_fd = -1;
run->ft = us_frametext_init();
@@ -81,6 +83,7 @@ us_drm_s *us_drm_init(void) {
drm->path = "/dev/dri/by-path/platform-gpu-card";
drm->port = "HDMI-A-2"; // OUT2 on PiKVM V4 Plus
drm->timeout = 5;
drm->blank_after = 5;
drm->run = run;
return drm;
}
@@ -98,17 +101,18 @@ int us_drm_open(us_drm_s *drm, const us_capture_s *cap) {
switch (_drm_check_status(drm)) {
case 0: break;
case -2: goto unplugged;
case US_ERROR_NO_DEVICE: goto unplugged;
default: goto error;
}
_D_LOG_INFO("Configuring DRM device for %s ...", (cap == NULL ? "STUB" : "DMA"));
_LOG_INFO("Using passthrough: %s[%s]", drm->path, drm->port);
_LOG_INFO("Configuring DRM device for %s ...", (cap == NULL ? "STUB" : "DMA"));
if ((run->fd = open(drm->path, O_RDWR | O_CLOEXEC | O_NONBLOCK)) < 0) {
_D_LOG_PERROR("Can't open DRM device");
_LOG_PERROR("Can't open DRM device");
goto error;
}
_D_LOG_DEBUG("DRM device fd=%d opened", run->fd);
_LOG_DEBUG("DRM device fd=%d opened", run->fd);
int stub = 0; // Open the real device with DMA
if (cap == NULL) {
@@ -117,18 +121,18 @@ int us_drm_open(us_drm_s *drm, const us_capture_s *cap) {
stub = US_DRM_STUB_BAD_FORMAT;
char fourcc_str[8];
us_fourcc_to_string(cap->run->format, fourcc_str, 8);
_D_LOG_ERROR("Input format %s is not supported, forcing to STUB ...", fourcc_str);
_LOG_ERROR("Input format %s is not supported, forcing to STUB ...", fourcc_str);
}
# define CHECK_CAP(x_cap) { \
_D_LOG_DEBUG("Checking %s ...", #x_cap); \
_LOG_DEBUG("Checking %s ...", #x_cap); \
u64 m_check; \
if (drmGetCap(run->fd, x_cap, &m_check) < 0) { \
_D_LOG_PERROR("Can't check " #x_cap); \
_LOG_PERROR("Can't check " #x_cap); \
goto error; \
} \
if (!m_check) { \
_D_LOG_ERROR(#x_cap " is not supported"); \
_LOG_ERROR(#x_cap " is not supported"); \
goto error; \
} \
}
@@ -143,13 +147,13 @@ int us_drm_open(us_drm_s *drm, const us_capture_s *cap) {
const uint hz = (stub > 0 ? 0 : cap->run->hz);
switch (_drm_find_sink(drm, width, height, hz)) {
case 0: break;
case -2: goto unplugged;
case US_ERROR_NO_DEVICE: goto unplugged;
default: goto error;
}
if ((stub == 0) && (width != run->mode.hdisplay || height < run->mode.vdisplay)) {
// We'll try to show something instead of nothing if height != vdisplay
stub = US_DRM_STUB_BAD_RESOLUTION;
_D_LOG_ERROR("There is no appropriate modes for the capture, forcing to STUB ...");
_LOG_ERROR("There is no appropriate modes for the capture, forcing to STUB ...");
}
if (_drm_init_buffers(drm, (stub > 0 ? NULL : cap)) < 0) {
@@ -157,29 +161,30 @@ int us_drm_open(us_drm_s *drm, const us_capture_s *cap) {
}
run->saved_crtc = drmModeGetCrtc(run->fd, run->crtc_id);
_D_LOG_DEBUG("Setting up CRTC ...");
_LOG_DEBUG("Setting up CRTC ...");
if (drmModeSetCrtc(run->fd, run->crtc_id, run->bufs[0].id, 0, 0, &run->conn_id, 1, &run->mode) < 0) {
_D_LOG_PERROR("Can't set CRTC");
_LOG_PERROR("Can't set CRTC");
goto error;
}
run->opened_for_stub = (stub > 0);
_LOG_INFO("Opened for %s ...", (stub > 0 ? "STUB" : "DMA"));
run->exposing_dma_fd = -1;
run->unplugged_reported = false;
_D_LOG_INFO("Opened for %s ...", (run->opened_for_stub ? "STUB" : "DMA"));
return stub;
run->blank_at_ts = 0;
run->opened = stub;
run->once = 0;
return run->opened;
error:
us_drm_close(drm);
return -1;
return run->opened; // -1 after us_drm_close()
unplugged:
if (!run->unplugged_reported) {
_D_LOG_ERROR("Display is not plugged");
run->unplugged_reported = true;
}
US_ONCE_FOR(run->once, __LINE__, {
_LOG_ERROR("Display is not plugged");
});
us_drm_close(drm);
return -2;
run->opened = US_ERROR_NO_DEVICE;
return run->opened;
}
void us_drm_close(us_drm_s *drm) {
@@ -194,33 +199,33 @@ void us_drm_close(us_drm_s *drm) {
}
if (run->saved_crtc != NULL) {
_D_LOG_DEBUG("Restoring CRTC ...");
_LOG_DEBUG("Restoring CRTC ...");
if (drmModeSetCrtc(run->fd,
run->saved_crtc->crtc_id, run->saved_crtc->buffer_id,
run->saved_crtc->x, run->saved_crtc->y,
&run->conn_id, 1, &run->saved_crtc->mode
) < 0 && errno != ENOENT) {
_D_LOG_PERROR("Can't restore CRTC");
_LOG_PERROR("Can't restore CRTC");
}
drmModeFreeCrtc(run->saved_crtc);
run->saved_crtc = NULL;
}
if (run->bufs != NULL) {
_D_LOG_DEBUG("Releasing buffers ...");
_LOG_DEBUG("Releasing buffers ...");
for (uint n_buf = 0; n_buf < run->n_bufs; ++n_buf) {
us_drm_buffer_s *const buf = &run->bufs[n_buf];
if (buf->fb_added && drmModeRmFB(run->fd, buf->id) < 0) {
_D_LOG_PERROR("Can't remove buffer=%u", n_buf);
_LOG_PERROR("Can't remove buffer=%u", n_buf);
}
if (buf->dumb_created) {
struct drm_mode_destroy_dumb destroy = {.handle = buf->handle};
if (drmIoctl(run->fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy) < 0) {
_D_LOG_PERROR("Can't destroy dumb buffer=%u", n_buf);
_LOG_PERROR("Can't destroy dumb buffer=%u", n_buf);
}
}
if (buf->data != NULL && munmap(buf->data, buf->allocated)) {
_D_LOG_PERROR("Can't unmap buffer=%u", n_buf);
_LOG_PERROR("Can't unmap buffer=%u", n_buf);
}
}
US_DELETE(run->bufs, free);
@@ -233,19 +238,48 @@ void us_drm_close(us_drm_s *drm) {
run->crtc_id = 0;
run->dpms_state = -1;
run->opened = -1;
run->has_vsync = true;
run->stub_n_buf = 0;
if (say) {
_D_LOG_INFO("Closed");
_LOG_INFO("Closed");
}
}
int us_drm_ensure_no_signal(us_drm_s *drm) {
us_drm_runtime_s *const run = drm->run;
assert(run->fd >= 0);
assert(run->opened > 0);
const ldf now_ts = us_get_now_monotonic();
if (run->blank_at_ts == 0) {
run->blank_at_ts = now_ts + drm->blank_after;
}
const ldf saved_ts = run->blank_at_ts; // us_drm*() rewrites it to 0
int retval;
if (now_ts <= run->blank_at_ts) {
retval = us_drm_wait_for_vsync(drm);
if (retval == 0) {
retval = us_drm_expose_stub(drm, US_DRM_STUB_NO_SIGNAL, NULL);
}
} else {
US_ONCE_FOR(run->once, __LINE__, {
_LOG_INFO("Turning off the display by timeout ...");
});
retval = us_drm_dpms_power_off(drm);
}
run->blank_at_ts = saved_ts;
return retval;
}
int us_drm_dpms_power_off(us_drm_s *drm) {
assert(drm->run->fd >= 0);
switch (_drm_check_status(drm)) {
case 0: break;
case -2: return 0; // Unplugged, nice
case US_ERROR_NO_DEVICE: return 0; // Unplugged, nice
// Во время переключения DPMS монитор моргает один раз состоянием disconnected,
// а потом почему-то снова оказывается connected. Так что просто считаем,
// что отсоединенный монитор на этом этапе - это нормально.
@@ -259,10 +293,11 @@ int us_drm_wait_for_vsync(us_drm_s *drm) {
us_drm_runtime_s *const run = drm->run;
assert(run->fd >= 0);
run->blank_at_ts = 0;
switch (_drm_check_status(drm)) {
case 0: break;
case -2: return -2;
case US_ERROR_NO_DEVICE: return US_ERROR_NO_DEVICE;
default: return -1;
}
_drm_ensure_dpms_power(drm, true);
@@ -276,13 +311,13 @@ int us_drm_wait_for_vsync(us_drm_s *drm) {
FD_ZERO(&fds);
FD_SET(run->fd, &fds);
_D_LOG_DEBUG("Calling select() for VSync ...");
_LOG_DEBUG("Calling select() for VSync ...");
const int result = select(run->fd + 1, &fds, NULL, NULL, &timeout);
if (result < 0) {
_D_LOG_PERROR("Can't select(%d) device for VSync", run->fd);
_LOG_PERROR("Can't select(%d) device for VSync", run->fd);
return -1;
} else if (result == 0) {
_D_LOG_ERROR("Device timeout while waiting VSync");
_LOG_ERROR("Device timeout while waiting VSync");
return -1;
}
@@ -290,9 +325,9 @@ int us_drm_wait_for_vsync(us_drm_s *drm) {
.version = DRM_EVENT_CONTEXT_VERSION,
.page_flip_handler = _drm_vsync_callback,
};
_D_LOG_DEBUG("Handling DRM event (maybe VSync) ...");
_LOG_DEBUG("Handling DRM event (maybe VSync) ...");
if (drmHandleEvent(run->fd, &ctx) < 0) {
_D_LOG_PERROR("Can't handle DRM event");
_LOG_PERROR("Can't handle DRM event");
return -1;
}
return 0;
@@ -306,18 +341,19 @@ static void _drm_vsync_callback(int fd, uint n_frame, uint sec, uint usec, void
us_drm_buffer_s *const buf = v_buf;
*buf->ctx.has_vsync = true;
*buf->ctx.exposing_dma_fd = -1;
_D_LOG_DEBUG("Got VSync signal");
_LOG_DEBUG("Got VSync signal");
}
int us_drm_expose_stub(us_drm_s *drm, us_drm_stub_e stub, const us_capture_s *cap) {
us_drm_runtime_s *const run = drm->run;
assert(run->fd >= 0);
assert(run->opened_for_stub);
assert(run->opened > 0);
run->blank_at_ts = 0;
switch (_drm_check_status(drm)) {
case 0: break;
case -2: return -2;
case US_ERROR_NO_DEVICE: return US_ERROR_NO_DEVICE;
default: return -1;
}
_drm_ensure_dpms_power(drm, true);
@@ -337,12 +373,7 @@ int us_drm_expose_stub(us_drm_s *drm, us_drm_stub_e stub, const us_capture_s *ca
break;
};
case US_DRM_STUB_BAD_FORMAT:
DRAW_MSG(
"=== PiKVM ==="
"\n \n< UNSUPPORTED CAPTURE FORMAT >"
"\n \nIt shouldn't happen ever."
"\n \nPlease check the logs and report a bug:"
"\n \n- https://github.com/pikvm/pikvm -");
DRAW_MSG("=== PiKVM ===\n \n< UNSUPPORTED CAPTURE FORMAT >");
break;
case US_DRM_STUB_NO_SIGNAL:
DRAW_MSG("=== PiKVM ===\n \n< NO SIGNAL >");
@@ -360,18 +391,18 @@ int us_drm_expose_stub(us_drm_s *drm, us_drm_stub_e stub, const us_capture_s *ca
run->has_vsync = false;
_D_LOG_DEBUG("Copying STUB frame ...")
_LOG_DEBUG("Copying STUB frame ...")
memcpy(buf->data, run->ft->frame->data, US_MIN(run->ft->frame->used, buf->allocated));
_D_LOG_DEBUG("Exposing STUB framebuffer n_buf=%u ...", run->stub_n_buf);
_LOG_DEBUG("Exposing STUB framebuffer n_buf=%u ...", run->stub_n_buf);
const int retval = drmModePageFlip(
run->fd, run->crtc_id, buf->id,
DRM_MODE_PAGE_FLIP_EVENT | DRM_MODE_PAGE_FLIP_ASYNC,
buf);
if (retval < 0) {
_D_LOG_PERROR("Can't expose STUB framebuffer n_buf=%u ...", run->stub_n_buf);
_LOG_PERROR("Can't expose STUB framebuffer n_buf=%u ...", run->stub_n_buf);
}
_D_LOG_DEBUG("Exposed STUB framebuffer n_buf=%u", run->stub_n_buf);
_LOG_DEBUG("Exposed STUB framebuffer n_buf=%u", run->stub_n_buf);
run->stub_n_buf = (run->stub_n_buf + 1) % run->n_bufs;
return retval;
@@ -382,26 +413,27 @@ int us_drm_expose_dma(us_drm_s *drm, const us_capture_hwbuf_s *hw) {
us_drm_buffer_s *const buf = &run->bufs[hw->buf.index];
assert(run->fd >= 0);
assert(!run->opened_for_stub);
assert(run->opened == 0);
run->blank_at_ts = 0;
switch (_drm_check_status(drm)) {
case 0: break;
case -2: return -2;
case US_ERROR_NO_DEVICE: return US_ERROR_NO_DEVICE;
default: return -1;
}
_drm_ensure_dpms_power(drm, true);
run->has_vsync = false;
_D_LOG_DEBUG("Exposing DMA framebuffer n_buf=%u ...", hw->buf.index);
_LOG_DEBUG("Exposing DMA framebuffer n_buf=%u ...", hw->buf.index);
const int retval = drmModePageFlip(
run->fd, run->crtc_id, buf->id,
DRM_MODE_PAGE_FLIP_EVENT | DRM_MODE_PAGE_FLIP_ASYNC,
buf);
if (retval < 0) {
_D_LOG_PERROR("Can't expose DMA framebuffer n_buf=%u ...", run->stub_n_buf);
_LOG_PERROR("Can't expose DMA framebuffer n_buf=%u ...", run->stub_n_buf);
}
_D_LOG_DEBUG("Exposed DMA framebuffer n_buf=%u", run->stub_n_buf);
_LOG_DEBUG("Exposed DMA framebuffer n_buf=%u", run->stub_n_buf);
run->exposing_dma_fd = hw->dma_fd;
return retval;
}
@@ -410,36 +442,36 @@ static int _drm_check_status(us_drm_s *drm) {
us_drm_runtime_s *run = drm->run;
if (run->status_fd < 0) {
_D_LOG_DEBUG("Trying to find status file ...");
_LOG_DEBUG("Trying to find status file ...");
struct stat st;
if (stat(drm->path, &st) < 0) {
_D_LOG_PERROR("Can't stat() DRM device");
_LOG_PERROR("Can't stat() DRM device");
goto error;
}
const uint mi = minor(st.st_rdev);
_D_LOG_DEBUG("DRM device minor(st_rdev)=%u", mi);
_LOG_DEBUG("DRM device minor(st_rdev)=%u", mi);
char path[128];
US_SNPRINTF(path, 127, "/sys/class/drm/card%u-%s/status", mi, drm->port);
_D_LOG_DEBUG("Opening status file %s ...", path);
_LOG_DEBUG("Opening status file %s ...", path);
if ((run->status_fd = open(path, O_RDONLY | O_CLOEXEC)) < 0) {
_D_LOG_PERROR("Can't open status file: %s", path);
_LOG_PERROR("Can't open status file: %s", path);
goto error;
}
_D_LOG_DEBUG("Status file fd=%d opened", run->status_fd);
_LOG_DEBUG("Status file fd=%d opened", run->status_fd);
}
char status_ch;
if (read(run->status_fd, &status_ch, 1) != 1) {
_D_LOG_PERROR("Can't read status file");
_LOG_PERROR("Can't read status file");
goto error;
}
if (lseek(run->status_fd, 0, SEEK_SET) != 0) {
_D_LOG_PERROR("Can't rewind status file");
_LOG_PERROR("Can't rewind status file");
goto error;
}
_D_LOG_DEBUG("Current display status: %c", status_ch);
return (status_ch == 'd' ? -2 : 0);
_LOG_DEBUG("Current display status: %c", status_ch);
return (status_ch == 'd' ? US_ERROR_NO_DEVICE : 0);
error:
US_CLOSE_FD(run->status_fd);
@@ -449,12 +481,12 @@ error:
static void _drm_ensure_dpms_power(us_drm_s *drm, bool on) {
us_drm_runtime_s *const run = drm->run;
if (run->dpms_id > 0 && run->dpms_state != (int)on) {
_D_LOG_INFO("Changing DPMS power mode: %d -> %u ...", run->dpms_state, on);
_LOG_INFO("Changing DPMS power mode: %d -> %u ...", run->dpms_state, on);
if (drmModeConnectorSetProperty(
run->fd, run->conn_id, run->dpms_id,
(on ? DRM_MODE_DPMS_ON : DRM_MODE_DPMS_OFF)
) < 0) {
_D_LOG_PERROR("Can't set DPMS power=%u (ignored)", on);
_LOG_PERROR("Can't set DPMS power=%u (ignored)", on);
}
}
run->dpms_state = (int)on;
@@ -466,7 +498,7 @@ static int _drm_init_buffers(us_drm_s *drm, const us_capture_s *cap) {
const uint n_bufs = (cap == NULL ? 4 : cap->run->n_bufs);
const char *name = (cap == NULL ? "STUB" : "DMA");
_D_LOG_DEBUG("Initializing %u %s buffers ...", n_bufs, name);
_LOG_DEBUG("Initializing %u %s buffers ...", n_bufs, name);
uint format = DRM_FORMAT_RGB888;
@@ -489,7 +521,7 @@ static int _drm_init_buffers(us_drm_s *drm, const us_capture_s *cap) {
.bpp = 24,
};
if (drmIoctl(run->fd, DRM_IOCTL_MODE_CREATE_DUMB, &create) < 0) {
_D_LOG_PERROR("Can't create %s buffer=%u", name, n_buf);
_LOG_PERROR("Can't create %s buffer=%u", name, n_buf);
return -1;
}
buf->handle = create.handle;
@@ -497,7 +529,7 @@ static int _drm_init_buffers(us_drm_s *drm, const us_capture_s *cap) {
struct drm_mode_map_dumb map = {.handle = create.handle};
if (drmIoctl(run->fd, DRM_IOCTL_MODE_MAP_DUMB, &map) < 0) {
_D_LOG_PERROR("Can't prepare dumb buffer=%u to mapping", n_buf);
_LOG_PERROR("Can't prepare dumb buffer=%u to mapping", n_buf);
return -1;
}
if ((buf->data = mmap(
@@ -505,7 +537,7 @@ static int _drm_init_buffers(us_drm_s *drm, const us_capture_s *cap) {
PROT_READ | PROT_WRITE, MAP_SHARED,
run->fd, map.offset
)) == MAP_FAILED) {
_D_LOG_PERROR("Can't map buffer=%u", n_buf);
_LOG_PERROR("Can't map buffer=%u", n_buf);
return -1;
}
memset(buf->data, 0, create.size);
@@ -516,7 +548,7 @@ static int _drm_init_buffers(us_drm_s *drm, const us_capture_s *cap) {
} else {
if (drmPrimeFDToHandle(run->fd, cap->run->bufs[n_buf].dma_fd, &buf->handle) < 0) {
_D_LOG_PERROR("Can't import DMA buffer=%u from capture device", n_buf);
_LOG_PERROR("Can't import DMA buffer=%u from capture device", n_buf);
return -1;
}
handles[0] = buf->handle;
@@ -533,7 +565,7 @@ static int _drm_init_buffers(us_drm_s *drm, const us_capture_s *cap) {
run->mode.hdisplay, run->mode.vdisplay, format,
handles, strides, offsets, &buf->id, 0
)) {
_D_LOG_PERROR("Can't setup buffer=%u", n_buf);
_LOG_PERROR("Can't setup buffer=%u", n_buf);
return -1;
}
buf->fb_added = true;
@@ -546,22 +578,22 @@ static int _drm_find_sink(us_drm_s *drm, uint width, uint height, float hz) {
run->crtc_id = 0;
_D_LOG_DEBUG("Trying to find the appropriate sink ...");
_LOG_DEBUG("Trying to find the appropriate sink ...");
drmModeRes *res = drmModeGetResources(run->fd);
if (res == NULL) {
_D_LOG_PERROR("Can't get resources info");
_LOG_PERROR("Can't get resources info");
goto done;
}
if (res->count_connectors <= 0) {
_D_LOG_ERROR("Can't find any connectors");
_LOG_ERROR("Can't find any connectors");
goto done;
}
for (int ci = 0; ci < res->count_connectors; ++ci) {
drmModeConnector *conn = drmModeGetConnector(run->fd, res->connectors[ci]);
if (conn == NULL) {
_D_LOG_PERROR("Can't get connector index=%d", ci);
_LOG_PERROR("Can't get connector index=%d", ci);
goto done;
}
@@ -573,37 +605,37 @@ static int _drm_find_sink(us_drm_s *drm, uint width, uint height, float hz) {
drmModeFreeConnector(conn);
continue;
}
_D_LOG_INFO("Using connector %s: conn_type=%d, conn_type_id=%d",
_LOG_INFO("Using connector %s: conn_type=%d, conn_type_id=%d",
drm->port, conn->connector_type, conn->connector_type_id);
if (conn->connection != DRM_MODE_CONNECTED) {
_D_LOG_ERROR("Connector for port %s has !DRM_MODE_CONNECTED", drm->port);
_LOG_ERROR("Connector for port %s has !DRM_MODE_CONNECTED", drm->port);
drmModeFreeConnector(conn);
goto done;
}
drmModeModeInfo *best;
if ((best = _find_best_mode(conn, width, height, hz)) == NULL) {
_D_LOG_ERROR("Can't find any appropriate display modes");
_LOG_ERROR("Can't find any appropriate display modes");
drmModeFreeConnector(conn);
goto unplugged;
}
_D_LOG_INFO("Using best mode: %ux%up%.02f",
_LOG_INFO("Using best mode: %ux%up%.02f",
best->hdisplay, best->vdisplay, _get_refresh_rate(best));
if ((run->dpms_id = _find_dpms(run->fd, conn)) > 0) {
_D_LOG_INFO("Using DPMS: id=%u", run->dpms_id);
_LOG_INFO("Using DPMS: id=%u", run->dpms_id);
} else {
_D_LOG_INFO("Using DPMS: None");
_LOG_INFO("Using DPMS: None");
}
u32 taken_crtcs = 0; // Unused here
if ((run->crtc_id = _find_crtc(run->fd, res, conn, &taken_crtcs)) == 0) {
_D_LOG_ERROR("Can't find CRTC");
_LOG_ERROR("Can't find CRTC");
drmModeFreeConnector(conn);
goto done;
}
_D_LOG_INFO("Using CRTC: id=%u", run->crtc_id);
_LOG_INFO("Using CRTC: id=%u", run->crtc_id);
run->conn_id = conn->connector_id;
memcpy(&run->mode, best, sizeof(drmModeModeInfo));
@@ -618,7 +650,7 @@ done:
unplugged:
drmModeFreeResources(res);
return -2;
return US_ERROR_NO_DEVICE;
}
static drmModeModeInfo *_find_best_mode(drmModeConnector *conn, uint width, uint height, float hz) {

View File

@@ -63,11 +63,14 @@ typedef struct {
uint n_bufs;
drmModeCrtc *saved_crtc;
int dpms_state;
bool opened_for_stub;
int opened;
bool has_vsync;
int exposing_dma_fd;
uint stub_n_buf;
bool unplugged_reported;
ldf blank_at_ts;
int once;
us_frametext_s *ft;
} us_drm_runtime_s;
@@ -75,6 +78,7 @@ typedef struct {
char *path;
char *port;
uint timeout;
uint blank_after;
us_drm_runtime_s *run;
} us_drm_s;
@@ -90,3 +94,4 @@ int us_drm_dpms_power_off(us_drm_s *drm);
int us_drm_wait_for_vsync(us_drm_s *drm);
int us_drm_expose_stub(us_drm_s *drm, us_drm_stub_e stub, const us_capture_s *cap);
int us_drm_expose_dma(us_drm_s *drm, const us_capture_hwbuf_s *hw);
int us_drm_ensure_no_signal(us_drm_s *drm);

View File

@@ -20,59 +20,8 @@
*****************************************************************************/
#include "h264.h"
#pragma once
#include <stdatomic.h>
#include "../libs/types.h"
#include "../libs/tools.h"
#include "../libs/logging.h"
#include "../libs/frame.h"
#include "../libs/memsink.h"
#include "../libs/unjpeg.h"
#include "m2m.h"
us_h264_stream_s *us_h264_stream_init(us_memsink_s *sink, const char *path, uint bitrate, uint gop) {
us_h264_stream_s *h264;
US_CALLOC(h264, 1);
h264->sink = sink;
h264->tmp_src = us_frame_init();
h264->dest = us_frame_init();
atomic_init(&h264->online, false);
h264->enc = us_m2m_h264_encoder_init("H264", path, bitrate, gop);
return h264;
}
void us_h264_stream_destroy(us_h264_stream_s *h264) {
us_m2m_encoder_destroy(h264->enc);
us_frame_destroy(h264->dest);
us_frame_destroy(h264->tmp_src);
free(h264);
}
void us_h264_stream_process(us_h264_stream_s *h264, const us_frame_s *frame, bool force_key) {
if (us_is_jpeg(frame->format)) {
const ldf now_ts = us_get_now_monotonic();
US_LOG_DEBUG("H264: Input frame is JPEG; decoding ...");
if (us_unjpeg(frame, h264->tmp_src, true) < 0) {
atomic_store(&h264->online, false);
return;
}
frame = h264->tmp_src;
US_LOG_VERBOSE("H264: JPEG decoded; time=%.3Lf", us_get_now_monotonic() - now_ts);
}
if (h264->key_requested) {
US_LOG_INFO("H264: Requested keyframe by a sink client");
h264->key_requested = false;
force_key = true;
}
bool online = false;
if (!us_m2m_encoder_compress(h264->enc, frame, h264->dest, force_key)) {
online = !us_memsink_server_put(h264->sink, h264->dest, &h264->key_requested);
}
atomic_store(&h264->online, online);
}
#define US_ERROR_COMMON -1
#define US_ERROR_NO_DEVICE -2
#define US_ERROR_NO_DATA -3

112
src/libs/fpsi.c Normal file
View File

@@ -0,0 +1,112 @@
/*****************************************************************************
# #
# uStreamer - Lightweight and fast MJPEG-HTTP streamer. #
# #
# Copyright (C) 2018-2024 Maxim Devaev <mdevaev@gmail.com> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <https://www.gnu.org/licenses/>. #
# #
*****************************************************************************/
#include "fpsi.h"
#include <stdatomic.h>
#include <pthread.h>
#include "types.h"
#include "tools.h"
#include "threading.h"
#include "logging.h"
#include "frame.h"
us_fpsi_s *us_fpsi_init(const char *name, bool with_meta) {
us_fpsi_s *fpsi;
US_CALLOC(fpsi, 1);
fpsi->name = us_strdup(name);
fpsi->with_meta = with_meta;
atomic_init(&fpsi->state_sec_ts, 0);
atomic_init(&fpsi->state, 0);
return fpsi;
}
void us_fpsi_destroy(us_fpsi_s *fpsi) {
free(fpsi->name);
free(fpsi);
}
void us_fpsi_frame_to_meta(const us_frame_s *frame, us_fpsi_meta_s *meta) {
meta->width = frame->width;
meta->height = frame->height;
meta->online = frame->online;
}
void us_fpsi_update(us_fpsi_s *fpsi, bool bump, const us_fpsi_meta_s *meta) {
if (meta != NULL) {
assert(fpsi->with_meta);
} else {
assert(!fpsi->with_meta);
}
const sll now_sec_ts = us_floor_ms(us_get_now_monotonic());
if (atomic_load(&fpsi->state_sec_ts) != now_sec_ts) {
US_LOG_PERF_FPS("FPS: %s: %u", fpsi->name, fpsi->accum);
// Fast mutex-less store method
ull state = (ull)fpsi->accum & 0xFFFF;
if (fpsi->with_meta) {
assert(meta != NULL);
state |= (ull)(meta->width & 0xFFFF) << 16;
state |= (ull)(meta->height & 0xFFFF) << 32;
state |= (ull)(meta->online ? 1 : 0) << 48;
}
atomic_store(&fpsi->state, state); // Сначала инфа
atomic_store(&fpsi->state_sec_ts, now_sec_ts); // Потом время, это важно
fpsi->accum = 0;
}
if (bump) {
++fpsi->accum;
}
}
uint us_fpsi_get(us_fpsi_s *fpsi, us_fpsi_meta_s *meta) {
if (meta != NULL) {
assert(fpsi->with_meta);
} else {
assert(!fpsi->with_meta);
}
// Между чтением инфы и времени может быть гонка,
// но это неважно. Если время свежее, до данные тоже
// будут свежмими, обратный случай не так важен.
const sll now_sec_ts = us_floor_ms(us_get_now_monotonic());
const sll state_sec_ts = atomic_load(&fpsi->state_sec_ts); // Сначала время
const ull state = atomic_load(&fpsi->state); // Потом инфа
uint current = state & 0xFFFF;
if (fpsi->with_meta) {
assert(meta != NULL);
meta->width = (state >> 16) & 0xFFFF;
meta->height = (state >> 32) & 0xFFFF;
meta->online = (state >> 48) & 1;
}
if (state_sec_ts != now_sec_ts && (state_sec_ts + 1) != now_sec_ts) {
// Только текущая или прошлая секунда
current = 0;
}
return current;
}

View File

@@ -24,23 +24,28 @@
#include <stdatomic.h>
#include "../libs/types.h"
#include "../libs/frame.h"
#include "../libs/memsink.h"
#include "m2m.h"
#include "types.h"
#include "frame.h"
typedef struct {
us_memsink_s *sink;
bool key_requested;
us_frame_s *tmp_src;
us_frame_s *dest;
us_m2m_encoder_s *enc;
atomic_bool online;
} us_h264_stream_s;
uint width;
uint height;
bool online;
} us_fpsi_meta_s;
typedef struct {
char *name;
bool with_meta;
uint accum;
atomic_llong state_sec_ts;
atomic_ullong state;
} us_fpsi_s;
us_h264_stream_s *us_h264_stream_init(us_memsink_s *sink, const char *path, uint bitrate, uint gop);
void us_h264_stream_destroy(us_h264_stream_s *h264);
void us_h264_stream_process(us_h264_stream_s *h264, const us_frame_s *frame, bool force_key);
us_fpsi_s *us_fpsi_init(const char *name, bool with_meta);
void us_fpsi_destroy(us_fpsi_s *fpsi);
void us_fpsi_frame_to_meta(const us_frame_s *frame, us_fpsi_meta_s *meta);
void us_fpsi_update(us_fpsi_s *fpsi, bool bump, const us_fpsi_meta_s *meta);
uint us_fpsi_get(us_fpsi_s *fpsi, us_fpsi_meta_s *meta);

View File

@@ -26,54 +26,58 @@
#include "tools.h"
#define US_FRAME_META_DECLARE \
uint width; \
uint height; \
uint format; \
uint stride; \
/* Stride is a bytesperline in V4L2 */ \
/* https://www.kernel.org/doc/html/v4.14/media/uapi/v4l/pixfmt-v4l2.html */ \
/* https://medium.com/@oleg.shipitko/what-does-stride-mean-in-image-processing-bba158a72bcd */ \
bool online; \
bool key; \
uint gop; \
\
ldf grab_ts; \
ldf encode_begin_ts; \
ldf encode_end_ts;
typedef struct {
u8 *data;
uz used;
uz allocated;
int dma_fd;
uint width;
uint height;
uint format;
uint stride;
// Stride is a bytesperline in V4L2
// https://www.kernel.org/doc/html/v4.14/media/uapi/v4l/pixfmt-v4l2.html
// https://medium.com/@oleg.shipitko/what-does-stride-mean-in-image-processing-bba158a72bcd
bool online;
bool key;
uint gop;
ldf grab_ts;
ldf encode_begin_ts;
ldf encode_end_ts;
US_FRAME_META_DECLARE;
} us_frame_s;
#define US_FRAME_COPY_META(x_src, x_dest) { \
x_dest->width = x_src->width; \
x_dest->height = x_src->height; \
x_dest->format = x_src->format; \
x_dest->stride = x_src->stride; \
x_dest->online = x_src->online; \
x_dest->key = x_src->key; \
x_dest->gop = x_src->gop; \
(x_dest)->width = (x_src)->width; \
(x_dest)->height = (x_src)->height; \
(x_dest)->format = (x_src)->format; \
(x_dest)->stride = (x_src)->stride; \
(x_dest)->online = (x_src)->online; \
(x_dest)->key = (x_src)->key; \
(x_dest)->gop = (x_src)->gop; \
\
x_dest->grab_ts = x_src->grab_ts; \
x_dest->encode_begin_ts = x_src->encode_begin_ts; \
x_dest->encode_end_ts = x_src->encode_end_ts; \
(x_dest)->grab_ts = (x_src)->grab_ts; \
(x_dest)->encode_begin_ts = (x_src)->encode_begin_ts; \
(x_dest)->encode_end_ts = (x_src)->encode_end_ts; \
}
#define US_FRAME_COMPARE_GEOMETRY(x_a, x_b) ( \
/* Compare the used size and significant meta (no timings) */ \
x_a->used == x_b->used \
(x_a)->used == (x_b)->used \
\
&& x_a->width == x_b->width \
&& x_a->height == x_b->height \
&& x_a->format == x_b->format \
&& x_a->stride == x_b->stride \
&& x_a->online == x_b->online \
&& x_a->key == x_b->key \
&& x_a->gop == x_b->gop \
&& (x_a)->width == (x_b)->width \
&& (x_a)->height == (x_b)->height \
&& (x_a)->format == (x_b)->format \
&& (x_a)->stride == (x_b)->stride \
&& (x_a)->online == (x_b)->online \
&& (x_a)->key == (x_b)->key \
&& (x_a)->gop == (x_b)->gop \
)

View File

@@ -25,9 +25,9 @@
#include <assert.h>
#define US_LIST_STRUCT(...) \
__VA_ARGS__ *prev; \
__VA_ARGS__ *next;
#define US_LIST_DECLARE \
void *prev; \
void *next;
#define US_LIST_ITERATE(x_first, x_item, ...) { \
for (__typeof__(x_first) x_item = x_first; x_item;) { \
@@ -42,7 +42,7 @@
x_first = x_item; \
} else { \
__typeof__(x_first) m_last = x_first; \
for (; m_last->next; m_last = m_last->next); \
for (; m_last->next != NULL; m_last = m_last->next); \
x_item->prev = m_last; \
m_last->next = x_item; \
} \
@@ -57,10 +57,12 @@
if (x_item->prev == NULL) { \
x_first = x_item->next; \
} else { \
x_item->prev->next = x_item->next; \
__typeof__(x_first) m_prev = x_item->prev; \
m_prev->next = x_item->next; \
} \
if (x_item->next != NULL) { \
x_item->next->prev = x_item->prev; \
__typeof__(x_first) m_next = x_item->next; \
m_next->prev = x_item->prev; \
} \
}

View File

@@ -33,13 +33,14 @@
#include <sys/mman.h>
#include "types.h"
#include "errors.h"
#include "tools.h"
#include "logging.h"
#include "frame.h"
#include "memsinksh.h"
us_memsink_s *us_memsink_init(
us_memsink_s *us_memsink_init_opened(
const char *name, const char *obj, bool server,
mode_t mode, bool rm, uint client_ttl, uint timeout) {
@@ -168,7 +169,7 @@ int us_memsink_server_put(us_memsink_s *sink, const us_frame_s *frame, bool *key
if (frame->used > sink->data_size) {
US_LOG_ERROR("%s-sink: Can't put frame: is too big (%zu > %zu)",
sink->name, frame->used, sink->data_size);
return 0; // -2
return 0;
}
if (us_flock_timedwait_monotonic(sink->fd, 1) == 0) {
@@ -213,7 +214,7 @@ int us_memsink_client_get(us_memsink_s *sink, us_frame_s *frame, bool *key_reque
if (us_flock_timedwait_monotonic(sink->fd, sink->timeout) < 0) {
if (errno == EWOULDBLOCK) {
return -2;
return US_ERROR_NO_DATA;
}
US_LOG_PERROR("%s-sink: Can't lock memory", sink->name);
return -1;
@@ -222,7 +223,7 @@ int us_memsink_client_get(us_memsink_s *sink, us_frame_s *frame, bool *key_reque
int retval = 0;
if (sink->mem->magic != US_MEMSINK_MAGIC) {
retval = -2; // Not updated
retval = US_ERROR_NO_DATA; // Not updated
goto done;
}
if (sink->mem->version != US_MEMSINK_VERSION) {
@@ -236,7 +237,7 @@ int us_memsink_client_get(us_memsink_s *sink, us_frame_s *frame, bool *key_reque
sink->mem->last_client_ts = us_get_now_monotonic();
if (sink->mem->id == sink->last_readed_id) {
retval = -2; // Not updated
retval = US_ERROR_NO_DATA; // Not updated
goto done;
}

View File

@@ -50,7 +50,7 @@ typedef struct {
} us_memsink_s;
us_memsink_s *us_memsink_init(
us_memsink_s *us_memsink_init_opened(
const char *name, const char *obj, bool server,
mode_t mode, bool rm, uint client_ttl, uint timeout);

View File

@@ -23,33 +23,23 @@
#pragma once
#include "types.h"
#include "frame.h"
#define US_MEMSINK_MAGIC ((u64)0xCAFEBABECAFEBABE)
#define US_MEMSINK_VERSION ((u32)6)
#define US_MEMSINK_VERSION ((u32)7)
typedef struct {
u64 magic;
u32 version;
u64 id;
uz used;
uint width;
uint height;
uint format;
uint stride;
bool online;
bool key;
uint gop;
ldf grab_ts;
ldf encode_begin_ts;
ldf encode_end_ts;
ldf last_client_ts;
bool key_requested;
US_FRAME_META_DECLARE;
} us_memsink_shared_s;

View File

@@ -58,7 +58,7 @@ int us_tc358743_xioctl_get_audio_hz(int fd, uint *audio_hz) {
US_MEMSET_ZERO(ctl);
ctl.id = TC358743_CID_AUDIO_SAMPLING_RATE;
if (us_xioctl(fd, VIDIOC_G_CTRL, &ctl) < 0) {
return -2;
return -1;
}
*audio_hz = ctl.value;
return 0;

View File

@@ -72,14 +72,16 @@
(m_a > m_b ? m_a : m_b); \
})
#define US_ONCE(...) { \
const int m_reported = __LINE__; \
if (m_reported != once) { \
#define US_ONCE_FOR(x_once, x_value, ...) { \
const int m_reported = (x_value); \
if (m_reported != (x_once)) { \
__VA_ARGS__; \
once = m_reported; \
(x_once) = m_reported; \
} \
}
#define US_ONCE(...) US_ONCE_FOR(once, __LINE__, ##__VA_ARGS__)
INLINE char *us_strdup(const char *str) {
char *const new = strdup(str);

View File

@@ -102,11 +102,11 @@ static const char *_http_get_header(struct evhttp_request *request, const char *
static char *_http_get_client_hostport(struct evhttp_request *request);
#define _S_LOG_ERROR(x_msg, ...) US_LOG_ERROR("HTTP: " x_msg, ##__VA_ARGS__)
#define _S_LOG_PERROR(x_msg, ...) US_LOG_PERROR("HTTP: " x_msg, ##__VA_ARGS__)
#define _S_LOG_INFO(x_msg, ...) US_LOG_INFO("HTTP: " x_msg, ##__VA_ARGS__)
#define _S_LOG_VERBOSE(x_msg, ...) US_LOG_VERBOSE("HTTP: " x_msg, ##__VA_ARGS__)
#define _S_LOG_DEBUG(x_msg, ...) US_LOG_DEBUG("HTTP: " x_msg, ##__VA_ARGS__)
#define _LOG_ERROR(x_msg, ...) US_LOG_ERROR("HTTP: " x_msg, ##__VA_ARGS__)
#define _LOG_PERROR(x_msg, ...) US_LOG_PERROR("HTTP: " x_msg, ##__VA_ARGS__)
#define _LOG_INFO(x_msg, ...) US_LOG_INFO("HTTP: " x_msg, ##__VA_ARGS__)
#define _LOG_VERBOSE(x_msg, ...) US_LOG_VERBOSE("HTTP: " x_msg, ##__VA_ARGS__)
#define _LOG_DEBUG(x_msg, ...) US_LOG_DEBUG("HTTP: " x_msg, ##__VA_ARGS__)
#define _A_EVBUFFER_NEW(x_buf) assert((x_buf = evbuffer_new()) != NULL)
#define _A_EVBUFFER_ADD(x_buf, x_data, x_size) assert(!evbuffer_add(x_buf, x_data, x_size))
@@ -120,6 +120,7 @@ us_server_s *us_server_init(us_stream_s *stream) {
us_server_exposed_s *exposed;
US_CALLOC(exposed, 1);
exposed->frame = us_frame_init();
exposed->queued_fpsi = us_fpsi_init("MJPEG-QUEUED", false);
us_server_runtime_s *run;
US_CALLOC(run, 1);
@@ -168,6 +169,7 @@ void us_server_destroy(us_server_s *server) {
});
US_LIST_ITERATE(run->stream_clients, client, { // cppcheck-suppress constStatement
us_fpsi_destroy(client->fpsi);
free(client->key);
free(client->hostport);
free(client);
@@ -175,6 +177,7 @@ void us_server_destroy(us_server_s *server) {
US_DELETE(run->auth_token, free);
us_fpsi_destroy(run->exposed->queued_fpsi);
us_frame_destroy(run->exposed->frame);
free(run->exposed);
free(server->run);
@@ -188,7 +191,7 @@ int us_server_listen(us_server_s *server) {
{
if (server->static_path[0] != '\0') {
_S_LOG_INFO("Enabling the file server: %s", server->static_path);
_LOG_INFO("Enabling the file server: %s", server->static_path);
evhttp_set_gencb(run->http, _http_callback_static, (void*)server);
} else {
assert(!evhttp_set_cb(run->http, "/", _http_callback_root, (void*)server));
@@ -227,11 +230,11 @@ int us_server_listen(us_server_s *server) {
US_ASPRINTF(run->auth_token, "Basic %s", encoded_token);
free(encoded_token);
_S_LOG_INFO("Using HTTP basic auth");
_LOG_INFO("Using HTTP basic auth");
}
if (server->unix_path[0] != '\0') {
_S_LOG_DEBUG("Binding server to UNIX socket '%s' ...", server->unix_path);
_LOG_DEBUG("Binding server to UNIX socket '%s' ...", server->unix_path);
if ((run->ext_fd = us_evhttp_bind_unix(
run->http,
server->unix_path,
@@ -240,33 +243,33 @@ int us_server_listen(us_server_s *server) {
) {
return -1;
}
_S_LOG_INFO("Listening HTTP on UNIX socket '%s'", server->unix_path);
_LOG_INFO("Listening HTTP on UNIX socket '%s'", server->unix_path);
# ifdef WITH_SYSTEMD
} else if (server->systemd) {
_S_LOG_DEBUG("Binding HTTP to systemd socket ...");
_LOG_DEBUG("Binding HTTP to systemd socket ...");
if ((run->ext_fd = us_evhttp_bind_systemd(run->http)) < 0) {
return -1;
}
_S_LOG_INFO("Listening systemd socket ...");
_LOG_INFO("Listening systemd socket ...");
# endif
} else {
_S_LOG_DEBUG("Binding HTTP to [%s]:%u ...", server->host, server->port);
_LOG_DEBUG("Binding HTTP to [%s]:%u ...", server->host, server->port);
if (evhttp_bind_socket(run->http, server->host, server->port) < 0) {
_S_LOG_PERROR("Can't bind HTTP on [%s]:%u", server->host, server->port)
_LOG_PERROR("Can't bind HTTP on [%s]:%u", server->host, server->port)
return -1;
}
_S_LOG_INFO("Listening HTTP on [%s]:%u", server->host, server->port);
_LOG_INFO("Listening HTTP on [%s]:%u", server->host, server->port);
}
return 0;
}
void us_server_loop(us_server_s *server) {
_S_LOG_INFO("Starting eventloop ...");
_LOG_INFO("Starting eventloop ...");
event_base_dispatch(server->run->base);
_S_LOG_INFO("Eventloop stopped");
_LOG_INFO("Eventloop stopped");
}
void us_server_loop_break(us_server_s *server) {
@@ -276,7 +279,7 @@ void us_server_loop_break(us_server_s *server) {
static int _http_preprocess_request(struct evhttp_request *request, us_server_s *server) {
const us_server_runtime_s *const run = server->run;
atomic_store(&server->stream->run->http_last_request_ts, us_get_now_monotonic());
atomic_store(&server->stream->run->http->last_request_ts, us_get_now_monotonic());
if (server->allow_origin[0] != '\0') {
const char *const cors_headers = _http_get_header(request, "Access-Control-Request-Headers");
@@ -407,18 +410,18 @@ static void _http_callback_static(struct evhttp_request *request, void *v_server
}
if ((fd = open(static_path, O_RDONLY)) < 0) {
_S_LOG_PERROR("Can't open found static file %s", static_path);
_LOG_PERROR("Can't open found static file %s", static_path);
goto not_found;
}
{
struct stat st;
if (fstat(fd, &st) < 0) {
_S_LOG_PERROR("Can't stat() found static file %s", static_path);
_LOG_PERROR("Can't stat() found static file %s", static_path);
goto not_found;
}
if (st.st_size > 0 && evbuffer_add_file(buf, fd, 0, st.st_size) < 0) {
_S_LOG_ERROR("Can't serve static file %s", static_path);
_LOG_ERROR("Can't serve static file %s", static_path);
goto not_found;
}
@@ -473,12 +476,27 @@ static void _http_callback_state(struct evhttp_request *request, void *v_server)
enc_quality
);
if (stream->run->h264 != NULL) {
# ifdef WITH_V4P
if (stream->drm != NULL) {
us_fpsi_meta_s meta;
const uint fps = us_fpsi_get(stream->run->http->drm_fpsi, &meta);
_A_EVBUFFER_ADD_PRINTF(buf,
" \"h264\": {\"bitrate\": %u, \"gop\": %u, \"online\": %s},",
" \"drm\": {\"live\": %s, \"fps\": %u},",
us_bool_to_string(meta.online),
fps
);
}
# endif
if (stream->h264_sink != NULL) {
us_fpsi_meta_s meta;
const uint fps = us_fpsi_get(stream->run->http->h264_fpsi, &meta);
_A_EVBUFFER_ADD_PRINTF(buf,
" \"h264\": {\"bitrate\": %u, \"gop\": %u, \"online\": %s, \"fps\": %u},",
stream->h264_bitrate,
stream->h264_gop,
us_bool_to_string(atomic_load(&stream->run->h264->online))
us_bool_to_string(meta.online),
fps
);
}
@@ -500,21 +518,18 @@ static void _http_callback_state(struct evhttp_request *request, void *v_server)
_A_EVBUFFER_ADD_PRINTF(buf, "},");
}
uint width;
uint height;
bool online;
uint captured_fps;
us_stream_get_capture_state(stream, &width, &height, &online, &captured_fps);
us_fpsi_meta_s captured_meta;
const uint captured_fps = us_fpsi_get(stream->run->http->captured_fpsi, &captured_meta);
_A_EVBUFFER_ADD_PRINTF(buf,
" \"source\": {\"resolution\": {\"width\": %u, \"height\": %u},"
" \"online\": %s, \"desired_fps\": %u, \"captured_fps\": %u},"
" \"stream\": {\"queued_fps\": %u, \"clients\": %u, \"clients_stat\": {",
(server->fake_width ? server->fake_width : width),
(server->fake_height ? server->fake_height : height),
us_bool_to_string(online),
(server->fake_width ? server->fake_width : captured_meta.width),
(server->fake_height ? server->fake_height : captured_meta.height),
us_bool_to_string(captured_meta.online),
stream->cap->desired_fps,
captured_fps,
ex->queued_fps,
us_fpsi_get(ex->queued_fpsi, NULL),
run->stream_clients_count
);
@@ -523,7 +538,7 @@ static void _http_callback_state(struct evhttp_request *request, void *v_server)
"\"%" PRIx64 "\": {\"fps\": %u, \"extra_headers\": %s, \"advance_headers\": %s,"
" \"dual_final_frames\": %s, \"zero_data\": %s, \"key\": \"%s\"}%s",
client->id,
client->fps,
us_fpsi_get(client->fpsi, NULL),
us_bool_to_string(client->extra_headers),
us_bool_to_string(client->advance_headers),
us_bool_to_string(client->dual_final_frames),
@@ -551,7 +566,7 @@ static void _http_callback_snapshot(struct evhttp_request *request, void *v_serv
client->request = request;
client->request_ts = us_get_now_monotonic();
atomic_fetch_add(&server->stream->run->http_snapshot_requested, 1);
atomic_fetch_add(&server->stream->run->http->snapshot_requested, 1);
US_LIST_APPEND(server->run->snapshot_clients, client);
}
@@ -590,26 +605,33 @@ static void _http_callback_stream(struct evhttp_request *request, void *v_server
client->hostport = _http_get_client_hostport(request);
client->id = us_get_now_id();
{
char *name;
US_ASPRINTF(name, "MJPEG-CLIENT-%" PRIx64, client->id);
client->fpsi = us_fpsi_init(name, false);
free(name);
}
US_LIST_APPEND_C(run->stream_clients, client, run->stream_clients_count);
if (run->stream_clients_count == 1) {
atomic_store(&server->stream->run->http_has_clients, true);
atomic_store(&server->stream->run->http->has_clients, true);
# ifdef WITH_GPIO
us_gpio_set_has_http_clients(true);
# endif
}
_S_LOG_INFO("NEW client (now=%u): %s, id=%" PRIx64,
_LOG_INFO("NEW client (now=%u): %s, id=%" PRIx64,
run->stream_clients_count, client->hostport, client->id);
struct bufferevent *const buf_event = evhttp_connection_get_bufferevent(conn);
if (server->tcp_nodelay && run->ext_fd >= 0) {
_S_LOG_DEBUG("Setting up TCP_NODELAY to the client %s ...", client->hostport);
_LOG_DEBUG("Setting up TCP_NODELAY to the client %s ...", client->hostport);
const evutil_socket_t fd = bufferevent_getfd(buf_event);
assert(fd >= 0);
int on = 1;
if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (void*)&on, sizeof(on)) != 0) {
_S_LOG_PERROR("Can't set TCP_NODELAY to the client %s", client->hostport);
_LOG_PERROR("Can't set TCP_NODELAY to the client %s", client->hostport);
}
}
bufferevent_setcb(buf_event, NULL, NULL, _http_callback_stream_error, (void*)client);
@@ -626,15 +648,7 @@ static void _http_callback_stream_write(struct bufferevent *buf_event, void *v_c
us_server_s *const server = client->server;
us_server_exposed_s *const ex = server->run->exposed;
const ldf now_ts = us_get_now_monotonic();
const sll now_sec_ts = us_floor_ms(now_ts);
if (now_sec_ts != client->fps_ts) {
client->fps = client->fps_accum;
client->fps_accum = 0;
client->fps_ts = now_sec_ts;
}
client->fps_accum += 1;
us_fpsi_update(client->fpsi, true, NULL);
struct evbuffer *buf;
_A_EVBUFFER_NEW(buf);
@@ -716,6 +730,7 @@ static void _http_callback_stream_write(struct bufferevent *buf_event, void *v_c
us_get_now_real(),
(client->extra_headers ? "" : RN)
);
const ldf now_ts = us_get_now_monotonic();
if (client->extra_headers) {
_A_EVBUFFER_ADD_PRINTF(buf,
"X-UStreamer-Online: %s" RN
@@ -736,7 +751,7 @@ static void _http_callback_stream_write(struct bufferevent *buf_event, void *v_c
ex->dropped,
ex->frame->width,
ex->frame->height,
client->fps,
us_fpsi_get(client->fpsi, NULL),
ex->frame->grab_ts,
ex->frame->encode_begin_ts,
ex->frame->encode_end_ts,
@@ -779,20 +794,21 @@ static void _http_callback_stream_error(struct bufferevent *buf_event, short wha
US_LIST_REMOVE_C(run->stream_clients, client, run->stream_clients_count);
if (run->stream_clients_count == 0) {
atomic_store(&server->stream->run->http_has_clients, false);
atomic_store(&server->stream->run->http->has_clients, false);
# ifdef WITH_GPIO
us_gpio_set_has_http_clients(false);
# endif
}
char *const reason = us_bufferevent_format_reason(what);
_S_LOG_INFO("DEL client (now=%u): %s, id=%" PRIx64 ", %s",
_LOG_INFO("DEL client (now=%u): %s, id=%" PRIx64 ", %s",
run->stream_clients_count, client->hostport, client->id, reason);
free(reason);
struct evhttp_connection *conn = evhttp_request_get_connection(client->request);
US_DELETE(conn, evhttp_connection_free);
us_fpsi_destroy(client->fpsi);
free(client->key);
free(client->hostport);
free(client);
@@ -802,8 +818,8 @@ static void _http_send_stream(us_server_s *server, bool stream_updated, bool fra
us_server_runtime_s *const run = server->run;
us_server_exposed_s *const ex = run->exposed;
bool has_clients = false;
bool queued = false;
bool has_clients = true;
US_LIST_ITERATE(run->stream_clients, client, { // cppcheck-suppress constStatement
struct evhttp_connection *const conn = evhttp_request_get_connection(client->request);
@@ -833,23 +849,14 @@ static void _http_send_stream(us_server_s *server, bool stream_updated, bool fra
} else if (stream_updated) { // Для dual
client->updated_prev = false;
}
has_clients = true;
}
});
if (queued) {
static uint queued_fps_accum = 0;
static sll queued_fps_ts = 0;
const sll now_sec_ts = us_floor_ms(us_get_now_monotonic());
if (now_sec_ts != queued_fps_ts) {
ex->queued_fps = queued_fps_accum;
queued_fps_accum = 0;
queued_fps_ts = now_sec_ts;
}
queued_fps_accum += 1;
us_fpsi_update(ex->queued_fpsi, true, NULL);
} else if (!has_clients) {
ex->queued_fps = 0;
us_fpsi_update(ex->queued_fpsi, false, NULL);
}
}
@@ -866,24 +873,22 @@ static void _http_send_snapshot(us_server_s *server) {
US_SNPRINTF(header_buf, 255, "%u", x_value); \
_A_ADD_HEADER(request, x_key, header_buf); \
}
uint width;
uint height;
uint captured_fps; // Unused
bool online;
us_stream_get_capture_state(server->stream, &width, &height, &online, &captured_fps);
us_fpsi_meta_s captured_meta;
us_fpsi_get(server->stream->run->http->captured_fpsi, &captured_meta);
US_LIST_ITERATE(server->run->snapshot_clients, client, { // cppcheck-suppress constStatement
struct evhttp_request *request = client->request;
const bool has_fresh_snapshot = (atomic_load(&server->stream->run->http_snapshot_requested) == 0);
const bool has_fresh_snapshot = (atomic_load(&server->stream->run->http->snapshot_requested) == 0);
const bool timed_out = (client->request_ts + US_MAX((uint)1, server->stream->error_delay * 3) < us_get_now_monotonic());
if (has_fresh_snapshot || timed_out) {
us_frame_s *frame = ex->frame;
if (!online) {
if (!captured_meta.online) {
if (blank == NULL) {
blank = us_blank_init();
us_blank_draw(blank, "< NO SIGNAL >", width, height);
us_blank_draw(blank, "< NO SIGNAL >", captured_meta.width, captured_meta.height);
}
frame = blank->jpeg;
}
@@ -930,7 +935,7 @@ static void _http_refresher(int fd, short what, void *v_server) {
us_server_s *server = v_server;
us_server_exposed_s *ex = server->run->exposed;
us_ring_s *const ring = server->stream->run->http_jpeg_ring;
us_ring_s *const ring = server->stream->run->http->jpeg_ring;
bool stream_updated = false;
bool frame_updated = false;
@@ -942,7 +947,7 @@ static void _http_refresher(int fd, short what, void *v_server) {
stream_updated = true;
us_ring_consumer_release(ring, ri);
} else if (ex->expose_end_ts + 1 < us_get_now_monotonic()) {
_S_LOG_DEBUG("Repeating exposed ...");
_LOG_DEBUG("Repeating exposed ...");
ex->expose_begin_ts = us_get_now_monotonic();
ex->expose_cmp_ts = ex->expose_begin_ts;
ex->expose_end_ts = ex->expose_begin_ts;
@@ -972,7 +977,7 @@ static void _http_refresher(int fd, short what, void *v_server) {
static bool _expose_frame(us_server_s *server, const us_frame_s *frame) {
us_server_exposed_s *const ex = server->run->exposed;
_S_LOG_DEBUG("Updating exposed frame (online=%d) ...", frame->online);
_LOG_DEBUG("Updating exposed frame (online=%d) ...", frame->online);
ex->expose_begin_ts = us_get_now_monotonic();
if (server->drop_same_frames && frame->online) {
@@ -984,13 +989,13 @@ static bool _expose_frame(us_server_s *server, const us_frame_s *frame) {
) {
ex->expose_cmp_ts = us_get_now_monotonic();
ex->expose_end_ts = ex->expose_cmp_ts;
_S_LOG_VERBOSE("Dropped same frame number %u; cmp_time=%.06Lf",
_LOG_VERBOSE("Dropped same frame number %u; cmp_time=%.06Lf",
ex->dropped, (ex->expose_cmp_ts - ex->expose_begin_ts));
ex->dropped += 1;
return false; // Not updated
} else {
ex->expose_cmp_ts = us_get_now_monotonic();
_S_LOG_VERBOSE("Passed same frame check (need_drop=%d, maybe_same=%d); cmp_time=%.06Lf",
_LOG_VERBOSE("Passed same frame check (need_drop=%d, maybe_same=%d); cmp_time=%.06Lf",
need_drop, maybe_same, (ex->expose_cmp_ts - ex->expose_begin_ts));
}
}
@@ -1007,7 +1012,7 @@ static bool _expose_frame(us_server_s *server, const us_frame_s *frame) {
ex->expose_cmp_ts = ex->expose_begin_ts;
ex->expose_end_ts = us_get_now_monotonic();
_S_LOG_VERBOSE("Exposed frame: online=%d, exp_time=%.06Lf",
_LOG_VERBOSE("Exposed frame: online=%d, exp_time=%.06Lf",
ex->frame->online, (ex->expose_end_ts - ex->expose_begin_ts));
return true; // Updated
}

View File

@@ -31,11 +31,12 @@
#include "../../libs/types.h"
#include "../../libs/frame.h"
#include "../../libs/list.h"
#include "../../libs/fpsi.h"
#include "../encoder.h"
#include "../stream.h"
typedef struct us_stream_client_sx {
typedef struct {
struct us_server_sx *server;
struct evhttp_request *request;
@@ -50,25 +51,23 @@ typedef struct us_stream_client_sx {
bool need_initial;
bool need_first_frame;
bool updated_prev;
uint fps_accum;
sll fps_ts;
uint fps;
US_LIST_STRUCT(struct us_stream_client_sx);
us_fpsi_s *fpsi;
US_LIST_DECLARE;
} us_stream_client_s;
typedef struct us_snapshot_client_sx {
typedef struct {
struct us_server_sx *server;
struct evhttp_request *request;
ldf request_ts;
US_LIST_STRUCT(struct us_snapshot_client_sx);
US_LIST_DECLARE;
} us_snapshot_client_s;
typedef struct {
us_frame_s *frame;
uint captured_fps;
uint queued_fps;
us_fpsi_s *queued_fpsi;
uint dropped;
ldf expose_begin_ts;
ldf expose_cmp_ts;

View File

@@ -56,11 +56,11 @@ static void _m2m_encoder_cleanup(us_m2m_encoder_s *enc);
static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *src, us_frame_s *dest, bool force_key);
#define _E_LOG_ERROR(x_msg, ...) US_LOG_ERROR("%s: " x_msg, enc->name, ##__VA_ARGS__)
#define _E_LOG_PERROR(x_msg, ...) US_LOG_PERROR("%s: " x_msg, enc->name, ##__VA_ARGS__)
#define _E_LOG_INFO(x_msg, ...) US_LOG_INFO("%s: " x_msg, enc->name, ##__VA_ARGS__)
#define _E_LOG_VERBOSE(x_msg, ...) US_LOG_VERBOSE("%s: " x_msg, enc->name, ##__VA_ARGS__)
#define _E_LOG_DEBUG(x_msg, ...) US_LOG_DEBUG("%s: " x_msg, enc->name, ##__VA_ARGS__)
#define _LOG_ERROR(x_msg, ...) US_LOG_ERROR("%s: " x_msg, enc->name, ##__VA_ARGS__)
#define _LOG_PERROR(x_msg, ...) US_LOG_PERROR("%s: " x_msg, enc->name, ##__VA_ARGS__)
#define _LOG_INFO(x_msg, ...) US_LOG_INFO("%s: " x_msg, enc->name, ##__VA_ARGS__)
#define _LOG_VERBOSE(x_msg, ...) US_LOG_VERBOSE("%s: " x_msg, enc->name, ##__VA_ARGS__)
#define _LOG_DEBUG(x_msg, ...) US_LOG_DEBUG("%s: " x_msg, enc->name, ##__VA_ARGS__)
us_m2m_encoder_s *us_m2m_h264_encoder_init(const char *name, const char *path, uint bitrate, uint gop) {
@@ -85,7 +85,7 @@ us_m2m_encoder_s *us_m2m_jpeg_encoder_init(const char *name, const char *path, u
}
void us_m2m_encoder_destroy(us_m2m_encoder_s *enc) {
_E_LOG_INFO("Destroying encoder ...");
_LOG_INFO("Destroying encoder ...");
_m2m_encoder_cleanup(enc);
free(enc->path);
free(enc->name);
@@ -95,29 +95,45 @@ void us_m2m_encoder_destroy(us_m2m_encoder_s *enc) {
int us_m2m_encoder_compress(us_m2m_encoder_s *enc, const us_frame_s *src, us_frame_s *dest, bool force_key) {
us_m2m_encoder_runtime_s *const run = enc->run;
us_frame_encoding_begin(src, dest, (enc->output_format == V4L2_PIX_FMT_MJPEG ? V4L2_PIX_FMT_JPEG : enc->output_format));
uint dest_format = enc->output_format;
switch (enc->output_format) {
case V4L2_PIX_FMT_JPEG:
force_key = false;
// fall through
case V4L2_PIX_FMT_MJPEG:
dest_format = V4L2_PIX_FMT_JPEG;
break;
case V4L2_PIX_FMT_H264:
force_key = (
force_key
|| run->last_online != src->online
|| run->last_encode_ts + 0.5 < us_get_now_monotonic()
);
break;
}
us_frame_encoding_begin(src, dest, dest_format);
_m2m_encoder_ensure(enc, src);
if (!run->ready) { // Already prepared but failed
return -1;
}
force_key = (enc->output_format == V4L2_PIX_FMT_H264 && (force_key || run->last_online != src->online));
_E_LOG_DEBUG("Compressing new frame; force_key=%d ...", force_key);
_LOG_DEBUG("Compressing new frame; force_key=%d ...", force_key);
if (_m2m_encoder_compress_raw(enc, src, dest, force_key) < 0) {
_m2m_encoder_cleanup(enc);
_E_LOG_ERROR("Encoder destroyed due an error (compress)");
_LOG_ERROR("Encoder destroyed due an error (compress)");
return -1;
}
us_frame_encoding_end(dest);
_E_LOG_VERBOSE("Compressed new frame: size=%zu, time=%0.3Lf, force_key=%d",
_LOG_VERBOSE("Compressed new frame: size=%zu, time=%0.3Lf, force_key=%d",
dest->used, dest->encode_end_ts - dest->encode_begin_ts, force_key);
run->last_online = src->online;
run->last_encode_ts = dest->encode_end_ts;
return 0;
}
@@ -151,7 +167,7 @@ static us_m2m_encoder_s *_m2m_encoder_init(
#define _E_XIOCTL(x_request, x_value, x_msg, ...) { \
if (us_xioctl(run->fd, x_request, x_value) < 0) { \
_E_LOG_PERROR(x_msg, ##__VA_ARGS__); \
_LOG_PERROR(x_msg, ##__VA_ARGS__); \
goto error; \
} \
}
@@ -170,9 +186,9 @@ static void _m2m_encoder_ensure(us_m2m_encoder_s *enc, const us_frame_s *frame)
return; // Configured already
}
_E_LOG_INFO("Configuring encoder: DMA=%d ...", dma);
_LOG_INFO("Configuring encoder: DMA=%d ...", dma);
_E_LOG_DEBUG("Encoder changes: width=%u->%u, height=%u->%u, input_format=%u->%u, stride=%u->%u, dma=%u->%u",
_LOG_DEBUG("Encoder changes: width=%u->%u, height=%u->%u, input_format=%u->%u, stride=%u->%u, dma=%u->%u",
run->p_width, frame->width,
run->p_height, frame->height,
run->p_input_format, frame->format,
@@ -187,18 +203,18 @@ static void _m2m_encoder_ensure(us_m2m_encoder_s *enc, const us_frame_s *frame)
run->p_stride = frame->stride;
run->p_dma = dma;
_E_LOG_DEBUG("Opening encoder device ...");
_LOG_DEBUG("Opening encoder device ...");
if ((run->fd = open(enc->path, O_RDWR)) < 0) {
_E_LOG_PERROR("Can't open encoder device");
_LOG_PERROR("Can't open encoder device");
goto error;
}
_E_LOG_DEBUG("Encoder device fd=%d opened", run->fd);
_LOG_DEBUG("Encoder device fd=%d opened", run->fd);
# define SET_OPTION(x_cid, x_value) { \
struct v4l2_control m_ctl = {0}; \
m_ctl.id = x_cid; \
m_ctl.value = x_value; \
_E_LOG_DEBUG("Configuring option " #x_cid " ..."); \
_LOG_DEBUG("Configuring option " #x_cid " ..."); \
_E_XIOCTL(VIDIOC_S_CTRL, &m_ctl, "Can't set option " #x_cid); \
}
if (enc->output_format == V4L2_PIX_FMT_H264) {
@@ -227,10 +243,10 @@ static void _m2m_encoder_ensure(us_m2m_encoder_s *enc, const us_frame_s *frame)
fmt.fmt.pix_mp.height = run->p_height;
fmt.fmt.pix_mp.pixelformat = run->p_input_format;
fmt.fmt.pix_mp.field = V4L2_FIELD_ANY;
fmt.fmt.pix_mp.colorspace = V4L2_COLORSPACE_JPEG; // libcamera currently has no means to request the right colour space
fmt.fmt.pix_mp.colorspace = V4L2_COLORSPACE_JPEG; // FIXME: Wrong colors
fmt.fmt.pix_mp.num_planes = 1;
// fmt.fmt.pix_mp.plane_fmt[0].bytesperline = run->p_stride;
_E_LOG_DEBUG("Configuring INPUT format ...");
_LOG_DEBUG("Configuring INPUT format ...");
_E_XIOCTL(VIDIOC_S_FMT, &fmt, "Can't set INPUT format");
}
@@ -249,13 +265,13 @@ static void _m2m_encoder_ensure(us_m2m_encoder_s *enc, const us_frame_s *frame)
// https://github.com/raspberrypi/linux/pull/5232
fmt.fmt.pix_mp.plane_fmt[0].sizeimage = (1024 + 512) << 10; // 1.5Mb
}
_E_LOG_DEBUG("Configuring OUTPUT format ...");
_LOG_DEBUG("Configuring OUTPUT format ...");
_E_XIOCTL(VIDIOC_S_FMT, &fmt, "Can't set OUTPUT format");
if (fmt.fmt.pix_mp.pixelformat != enc->output_format) {
char fourcc_str[8];
_E_LOG_ERROR("The OUTPUT format can't be configured as %s",
_LOG_ERROR("The OUTPUT format can't be configured as %s",
us_fourcc_to_string(enc->output_format, fourcc_str, 8));
_E_LOG_ERROR("In case of Raspberry Pi, try to append 'start_x=1' to /boot/config.txt");
_LOG_ERROR("In case of Raspberry Pi, try to append 'start_x=1' to /boot/config.txt");
goto error;
}
}
@@ -277,7 +293,7 @@ static void _m2m_encoder_ensure(us_m2m_encoder_s *enc, const us_frame_s *frame)
setfps.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
setfps.parm.output.timeperframe.numerator = 1;
setfps.parm.output.timeperframe.denominator = run->fps_limit;
_E_LOG_DEBUG("Configuring INPUT FPS ...");
_LOG_DEBUG("Configuring INPUT FPS ...");
_E_XIOCTL(VIDIOC_S_PARM, &setfps, "Can't set INPUT FPS");
}
@@ -296,21 +312,21 @@ static void _m2m_encoder_ensure(us_m2m_encoder_s *enc, const us_frame_s *frame)
{
enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
_E_LOG_DEBUG("Starting INPUT ...");
_LOG_DEBUG("Starting INPUT ...");
_E_XIOCTL(VIDIOC_STREAMON, &type, "Can't start INPUT");
type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
_E_LOG_DEBUG("Starting OUTPUT ...");
_LOG_DEBUG("Starting OUTPUT ...");
_E_XIOCTL(VIDIOC_STREAMON, &type, "Can't start OUTPUT");
}
run->ready = true;
_E_LOG_INFO("Encoder is ready");
_LOG_INFO("Encoder is ready");
return;
error:
_m2m_encoder_cleanup(enc);
_E_LOG_ERROR("Encoder destroyed due an error (prepare)");
_LOG_ERROR("Encoder destroyed due an error (prepare)");
}
static int _m2m_encoder_init_buffers(
@@ -319,20 +335,20 @@ static int _m2m_encoder_init_buffers(
us_m2m_encoder_runtime_s *const run = enc->run;
_E_LOG_DEBUG("Initializing %s buffers ...", name);
_LOG_DEBUG("Initializing %s buffers ...", name);
struct v4l2_requestbuffers req = {0};
req.count = 1;
req.type = type;
req.memory = (dma ? V4L2_MEMORY_DMABUF : V4L2_MEMORY_MMAP);
_E_LOG_DEBUG("Requesting %u %s buffers ...", req.count, name);
_LOG_DEBUG("Requesting %u %s buffers ...", req.count, name);
_E_XIOCTL(VIDIOC_REQBUFS, &req, "Can't request %s buffers", name);
if (req.count < 1) {
_E_LOG_ERROR("Insufficient %s buffer memory: %u", name, req.count);
_LOG_ERROR("Insufficient %s buffer memory: %u", name, req.count);
goto error;
}
_E_LOG_DEBUG("Got %u %s buffers", req.count, name);
_LOG_DEBUG("Got %u %s buffers", req.count, name);
if (dma) {
*n_bufs_ptr = req.count;
@@ -349,25 +365,25 @@ static int _m2m_encoder_init_buffers(
buf.length = 1;
buf.m.planes = &plane;
_E_LOG_DEBUG("Querying %s buffer=%u ...", name, *n_bufs_ptr);
_LOG_DEBUG("Querying %s buffer=%u ...", name, *n_bufs_ptr);
_E_XIOCTL(VIDIOC_QUERYBUF, &buf, "Can't query %s buffer=%u", name, *n_bufs_ptr);
_E_LOG_DEBUG("Mapping %s buffer=%u ...", name, *n_bufs_ptr);
_LOG_DEBUG("Mapping %s buffer=%u ...", name, *n_bufs_ptr);
if (((*bufs_ptr)[*n_bufs_ptr].data = mmap(
NULL, plane.length,
PROT_READ | PROT_WRITE, MAP_SHARED,
run->fd, plane.m.mem_offset
)) == MAP_FAILED) {
_E_LOG_PERROR("Can't map %s buffer=%u", name, *n_bufs_ptr);
_LOG_PERROR("Can't map %s buffer=%u", name, *n_bufs_ptr);
goto error;
}
assert((*bufs_ptr)[*n_bufs_ptr].data != NULL);
(*bufs_ptr)[*n_bufs_ptr].allocated = plane.length;
_E_LOG_DEBUG("Queuing %s buffer=%u ...", name, *n_bufs_ptr);
_LOG_DEBUG("Queuing %s buffer=%u ...", name, *n_bufs_ptr);
_E_XIOCTL(VIDIOC_QBUF, &buf, "Can't queue %s buffer=%u", name, *n_bufs_ptr);
}
_E_LOG_DEBUG("All %s buffers are ready", name);
_LOG_DEBUG("All %s buffers are ready", name);
return 0;
error: // Mostly for _E_XIOCTL
@@ -383,9 +399,9 @@ static void _m2m_encoder_cleanup(us_m2m_encoder_s *enc) {
say = true;
# define STOP_STREAM(x_name, x_type) { \
enum v4l2_buf_type m_type_var = x_type; \
_E_LOG_DEBUG("Stopping %s ...", x_name); \
_LOG_DEBUG("Stopping %s ...", x_name); \
if (us_xioctl(run->fd, VIDIOC_STREAMOFF, &m_type_var) < 0) { \
_E_LOG_PERROR("Can't stop %s", x_name); \
_LOG_PERROR("Can't stop %s", x_name); \
} \
}
STOP_STREAM("OUTPUT", V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
@@ -400,7 +416,7 @@ static void _m2m_encoder_cleanup(us_m2m_encoder_s *enc) {
us_m2m_buffer_s *m_buf = &run->x_target##_bufs[m_index]; \
if (m_buf->allocated > 0 && m_buf->data != NULL) { \
if (munmap(m_buf->data, m_buf->allocated) < 0) { \
_E_LOG_PERROR("Can't unmap %s buffer=%u", #x_name, m_index); \
_LOG_PERROR("Can't unmap %s buffer=%u", #x_name, m_index); \
} \
} \
} \
@@ -415,7 +431,7 @@ static void _m2m_encoder_cleanup(us_m2m_encoder_s *enc) {
if (run->fd >= 0) {
say = true;
if (close(run->fd) < 0) {
_E_LOG_PERROR("Can't close encoder device");
_LOG_PERROR("Can't close encoder device");
}
run->fd = -1;
}
@@ -424,7 +440,7 @@ static void _m2m_encoder_cleanup(us_m2m_encoder_s *enc) {
run->ready = false;
if (say) {
_E_LOG_INFO("Encoder closed");
_LOG_INFO("Encoder closed");
}
}
@@ -437,7 +453,7 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
struct v4l2_control ctl = {0};
ctl.id = V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME;
ctl.value = 1;
_E_LOG_DEBUG("Forcing keyframe ...")
_LOG_DEBUG("Forcing keyframe ...")
_E_XIOCTL(VIDIOC_S_CTRL, &ctl, "Can't force keyframe");
}
@@ -452,17 +468,17 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
input_buf.memory = V4L2_MEMORY_DMABUF;
input_buf.field = V4L2_FIELD_NONE;
input_plane.m.fd = src->dma_fd;
_E_LOG_DEBUG("Using INPUT-DMA buffer=%u", input_buf.index);
_LOG_DEBUG("Using INPUT-DMA buffer=%u", input_buf.index);
} else {
input_buf.memory = V4L2_MEMORY_MMAP;
_E_LOG_DEBUG("Grabbing INPUT buffer ...");
_LOG_DEBUG("Grabbing INPUT buffer ...");
_E_XIOCTL(VIDIOC_DQBUF, &input_buf, "Can't grab INPUT buffer");
if (input_buf.index >= run->n_input_bufs) {
_E_LOG_ERROR("V4L2 error: grabbed invalid INPUT: buffer=%u, n_bufs=%u",
_LOG_ERROR("V4L2 error: grabbed invalid INPUT: buffer=%u, n_bufs=%u",
input_buf.index, run->n_input_bufs);
goto error;
}
_E_LOG_DEBUG("Grabbed INPUT buffer=%u", input_buf.index);
_LOG_DEBUG("Grabbed INPUT buffer=%u", input_buf.index);
}
const u64 now_ts = us_get_now_monotonic_u64();
@@ -481,7 +497,7 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
const char *input_name = (run->p_dma ? "INPUT-DMA" : "INPUT");
_E_LOG_DEBUG("Sending%s %s buffer ...", (!run->p_dma ? " (releasing)" : ""), input_name);
_LOG_DEBUG("Sending%s %s buffer ...", (!run->p_dma ? " (releasing)" : ""), input_name);
_E_XIOCTL(VIDIOC_QBUF, &input_buf, "Can't send %s buffer", input_name);
// Для не-DMA отправка буфера по факту являтся освобождением этого буфера
@@ -493,20 +509,20 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
while (true) {
if (us_get_now_monotonic() > deadline_ts) {
_E_LOG_ERROR("Waiting for the encoder is too long");
_LOG_ERROR("Waiting for the encoder is too long");
goto error;
}
struct pollfd enc_poll = {run->fd, POLLIN, 0};
_E_LOG_DEBUG("Polling encoder ...");
_LOG_DEBUG("Polling encoder ...");
if (poll(&enc_poll, 1, 1000) < 0 && errno != EINTR) {
_E_LOG_PERROR("Can't poll encoder");
_LOG_PERROR("Can't poll encoder");
goto error;
}
if (enc_poll.revents & POLLIN) {
if (!input_released) {
_E_LOG_DEBUG("Releasing %s buffer=%u ...", input_name, input_buf.index);
_LOG_DEBUG("Releasing %s buffer=%u ...", input_name, input_buf.index);
_E_XIOCTL(VIDIOC_DQBUF, &input_buf, "Can't release %s buffer=%u",
input_name, input_buf.index);
input_released = true;
@@ -518,7 +534,7 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
output_buf.memory = V4L2_MEMORY_MMAP;
output_buf.length = 1;
output_buf.m.planes = &output_plane;
_E_LOG_DEBUG("Fetching OUTPUT buffer ...");
_LOG_DEBUG("Fetching OUTPUT buffer ...");
_E_XIOCTL(VIDIOC_DQBUF, &output_buf, "Can't fetch OUTPUT buffer");
bool done = false;
@@ -526,7 +542,7 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
// Енкодер первый раз может выдать буфер с мусором и нулевым таймстампом,
// так что нужно убедиться, что мы читаем выходной буфер, соответствующий
// входному (с тем же таймстампом).
_E_LOG_DEBUG("Need to retry OUTPUT buffer due timestamp mismatch");
_LOG_DEBUG("Need to retry OUTPUT buffer due timestamp mismatch");
} else {
us_frame_set_data(dest, run->output_bufs[output_buf.index].data, output_plane.bytesused);
dest->key = output_buf.flags & V4L2_BUF_FLAG_KEYFRAME;
@@ -534,7 +550,7 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
done = true;
}
_E_LOG_DEBUG("Releasing OUTPUT buffer=%u ...", output_buf.index);
_LOG_DEBUG("Releasing OUTPUT buffer=%u ...", output_buf.index);
_E_XIOCTL(VIDIOC_QBUF, &output_buf, "Can't release OUTPUT buffer=%u", output_buf.index);
if (done) {

View File

@@ -47,6 +47,7 @@ typedef struct {
bool ready;
int last_online;
ldf last_encode_ts;
} us_m2m_encoder_runtime_s;
typedef struct {

View File

@@ -270,6 +270,9 @@ void us_options_destroy(us_options_s *options) {
US_DELETE(options->jpeg_sink, us_memsink_destroy);
US_DELETE(options->raw_sink, us_memsink_destroy);
US_DELETE(options->h264_sink, us_memsink_destroy);
# ifdef WITH_V4P
US_DELETE(options->drm, us_drm_destroy);
# endif
for (unsigned index = 0; index < options->argc; ++index) {
free(options->argv_copy[index]);
@@ -463,7 +466,10 @@ int options_parse(us_options_s *options, us_capture_s *cap, us_encoder_s *enc, u
case _O_H264_M2M_DEVICE: OPT_SET(stream->h264_m2m_path, optarg);
# ifdef WITH_V4P
case _O_V4P: OPT_SET(stream->v4p, true);
case _O_V4P:
options->drm = us_drm_init();
stream->drm = options->drm;
break;
# endif
# ifdef WITH_GPIO
@@ -507,7 +513,7 @@ int options_parse(us_options_s *options, us_capture_s *cap, us_encoder_s *enc, u
# define ADD_SINK(x_label, x_prefix) { \
if (x_prefix##_name && x_prefix##_name[0] != '\0') { \
options->x_prefix = us_memsink_init( \
options->x_prefix = us_memsink_init_opened( \
x_label, \
x_prefix##_name, \
true, \

View File

@@ -40,6 +40,9 @@
#include "../libs/memsink.h"
#include "../libs/options.h"
#include "../libs/capture.h"
#ifdef WITH_V4P
# include "../libs/drm/drm.h"
#endif
#include "encoder.h"
#include "stream.h"
@@ -56,6 +59,9 @@ typedef struct {
us_memsink_s *jpeg_sink;
us_memsink_s *raw_sink;
us_memsink_s *h264_sink;
# ifdef WITH_V4P
us_drm_s *drm;
# endif
} us_options_s;

View File

@@ -24,6 +24,7 @@
#include <stdlib.h>
#include <stdatomic.h>
#include <limits.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
@@ -31,6 +32,7 @@
#include <pthread.h>
#include "../libs/types.h"
#include "../libs/errors.h"
#include "../libs/tools.h"
#include "../libs/threading.h"
#include "../libs/process.h"
@@ -39,6 +41,8 @@
#include "../libs/frame.h"
#include "../libs/memsink.h"
#include "../libs/capture.h"
#include "../libs/unjpeg.h"
#include "../libs/fpsi.h"
#ifdef WITH_V4P
# include "../libs/drm/drm.h"
#endif
@@ -46,7 +50,7 @@
#include "blank.h"
#include "encoder.h"
#include "workers.h"
#include "h264.h"
#include "m2m.h"
#ifdef WITH_GPIO
# include "gpio/gpio.h"
#endif
@@ -68,12 +72,10 @@ typedef struct {
} _worker_context_s;
static void _stream_set_capture_state(us_stream_s *stream, uint width, uint height, bool online, uint captured_fps);
static void *_releaser_thread(void *v_ctx);
static void *_jpeg_thread(void *v_ctx);
static void *_h264_thread(void *v_ctx);
static void *_raw_thread(void *v_ctx);
static void *_h264_thread(void *v_ctx);
#ifdef WITH_V4P
static void *_drm_thread(void *v_ctx);
#endif
@@ -88,19 +90,28 @@ static void _stream_drm_ensure_no_signal(us_stream_s *stream);
#endif
static void _stream_expose_jpeg(us_stream_s *stream, const us_frame_s *frame);
static void _stream_expose_raw(us_stream_s *stream, const us_frame_s *frame);
static void _stream_encode_expose_h264(us_stream_s *stream, const us_frame_s *frame, bool force_key);
static void _stream_check_suicide(us_stream_s *stream);
us_stream_s *us_stream_init(us_capture_s *cap, us_encoder_s *enc) {
us_stream_http_s *http;
US_CALLOC(http, 1);
# ifdef WITH_V4P
http->drm_fpsi = us_fpsi_init("DRM", true);
# endif
http->h264_fpsi = us_fpsi_init("H264", true);
US_RING_INIT_WITH_ITEMS(http->jpeg_ring, 4, us_frame_init);
atomic_init(&http->has_clients, false);
atomic_init(&http->snapshot_requested, 0);
atomic_init(&http->last_request_ts, 0);
http->captured_fpsi = us_fpsi_init("STREAM-CAPTURED", true);
us_stream_runtime_s *run;
US_CALLOC(run, 1);
US_RING_INIT_WITH_ITEMS(run->http_jpeg_ring, 4, us_frame_init);
atomic_init(&run->http_has_clients, false);
atomic_init(&run->http_snapshot_requested, 0);
atomic_init(&run->http_last_request_ts, 0);
atomic_init(&run->http_capture_state, 0);
atomic_init(&run->stop, false);
run->blank = us_blank_init();
run->http = http;
us_stream_s *stream;
US_CALLOC(stream, 1);
@@ -112,13 +123,21 @@ us_stream_s *us_stream_init(us_capture_s *cap, us_encoder_s *enc) {
stream->run = run;
us_blank_draw(run->blank, "< NO SIGNAL >", cap->width, cap->height);
_stream_set_capture_state(stream, cap->width, cap->height, false, 0);
us_fpsi_meta_s meta = {0};
us_fpsi_frame_to_meta(run->blank->raw, &meta);
us_fpsi_update(http->captured_fpsi, false, &meta);
return stream;
}
void us_stream_destroy(us_stream_s *stream) {
us_fpsi_destroy(stream->run->http->captured_fpsi);
US_RING_DELETE_WITH_ITEMS(stream->run->http->jpeg_ring, us_frame_destroy);
us_fpsi_destroy(stream->run->http->h264_fpsi);
# ifdef WITH_V4P
us_fpsi_destroy(stream->run->http->drm_fpsi);
# endif
us_blank_destroy(stream->run->blank);
US_RING_DELETE_WITH_ITEMS(stream->run->http_jpeg_ring, us_frame_destroy);
free(stream->run->http);
free(stream->run);
free(stream);
}
@@ -127,23 +146,14 @@ void us_stream_loop(us_stream_s *stream) {
us_stream_runtime_s *const run = stream->run;
us_capture_s *const cap = stream->cap;
US_LOG_INFO("Using V4L2 device: %s", cap->path);
US_LOG_INFO("Using desired FPS: %u", cap->desired_fps);
atomic_store(&run->http_last_request_ts, us_get_now_monotonic());
atomic_store(&run->http->last_request_ts, us_get_now_monotonic());
if (stream->h264_sink != NULL) {
run->h264 = us_h264_stream_init(stream->h264_sink, stream->h264_m2m_path, stream->h264_bitrate, stream->h264_gop);
run->h264_enc = us_m2m_h264_encoder_init("H264", stream->h264_m2m_path, stream->h264_bitrate, stream->h264_gop);
run->h264_tmp_src = us_frame_init();
run->h264_dest = us_frame_init();
}
# ifdef WITH_V4P
if (stream->v4p) {
run->drm = us_drm_init();
run->drm_opened = -1;
US_LOG_INFO("Using passthrough: %s[%s]", run->drm->path, run->drm->port);
}
# endif
while (!_stream_init_loop(stream)) {
atomic_bool threads_stop;
atomic_init(&threads_stop, false);
@@ -162,84 +172,53 @@ void us_stream_loop(us_stream_s *stream) {
US_THREAD_CREATE(ctx->tid, _releaser_thread, ctx);
}
_worker_context_s jpeg_ctx = {
.queue = us_queue_init(cap->run->n_bufs),
.stream = stream,
.stop = &threads_stop,
};
US_THREAD_CREATE(jpeg_ctx.tid, _jpeg_thread, &jpeg_ctx);
_worker_context_s h264_ctx;
if (run->h264 != NULL) {
h264_ctx.queue = us_queue_init(cap->run->n_bufs);
h264_ctx.stream = stream;
h264_ctx.stop = &threads_stop;
US_THREAD_CREATE(h264_ctx.tid, _h264_thread, &h264_ctx);
}
_worker_context_s raw_ctx;
if (stream->raw_sink != NULL) {
raw_ctx.queue = us_queue_init(2);
raw_ctx.stream = stream;
raw_ctx.stop = &threads_stop;
US_THREAD_CREATE(raw_ctx.tid, _raw_thread, &raw_ctx);
}
# define CREATE_WORKER(x_cond, x_ctx, x_thread, x_capacity) \
_worker_context_s *x_ctx = NULL; \
if (x_cond) { \
US_CALLOC(x_ctx, 1); \
x_ctx->queue = us_queue_init(x_capacity); \
x_ctx->stream = stream; \
x_ctx->stop = &threads_stop; \
US_THREAD_CREATE(x_ctx->tid, (x_thread), x_ctx); \
}
CREATE_WORKER(true, jpeg_ctx, _jpeg_thread, cap->run->n_bufs);
CREATE_WORKER((stream->raw_sink != NULL), raw_ctx, _raw_thread, 2);
CREATE_WORKER((stream->h264_sink != NULL), h264_ctx, _h264_thread, cap->run->n_bufs);
# ifdef WITH_V4P
_worker_context_s drm_ctx;
if (stream->v4p) {
drm_ctx.queue = us_queue_init(cap->run->n_bufs);
drm_ctx.stream = stream;
drm_ctx.stop = &threads_stop;
US_THREAD_CREATE(drm_ctx.tid, _drm_thread, &drm_ctx); // cppcheck-suppress assertWithSideEffect
}
CREATE_WORKER((stream->drm != NULL), drm_ctx, _drm_thread, cap->run->n_bufs); // cppcheck-suppress assertWithSideEffect
# endif
uint captured_fps_accum = 0;
sll captured_fps_ts = 0;
uint captured_fps = 0;
# undef CREATE_WORKER
US_LOG_INFO("Capturing ...");
uint slowdown_count = 0;
while (!atomic_load(&run->stop) && !atomic_load(&threads_stop)) {
us_capture_hwbuf_s *hw;
switch (us_capture_grab_buffer(cap, &hw)) {
case -2: continue; // Broken frame
case -1: goto close; // Error
default: break; // Grabbed on >= 0
switch (us_capture_hwbuf_grab(cap, &hw)) {
case 0 ... INT_MAX: break; // Grabbed buffer number
case US_ERROR_NO_DATA: continue; // Broken frame
default: goto close; // Any error
}
const sll now_sec_ts = us_floor_ms(us_get_now_monotonic());
if (now_sec_ts != captured_fps_ts) {
captured_fps = captured_fps_accum;
captured_fps_accum = 0;
captured_fps_ts = now_sec_ts;
US_LOG_PERF_FPS("A new second has come; captured_fps=%u", captured_fps);
}
captured_fps_accum += 1;
us_fpsi_meta_s meta = {0};
us_fpsi_frame_to_meta(&hw->raw, &meta);
us_fpsi_update(run->http->captured_fpsi, true, &meta);
_stream_set_capture_state(stream, cap->run->width, cap->run->height, true, captured_fps);
# ifdef WITH_GPIO
us_gpio_set_stream_online(true);
# endif
us_capture_buffer_incref(hw); // JPEG
us_queue_put(jpeg_ctx.queue, hw, 0);
if (run->h264 != NULL) {
us_capture_buffer_incref(hw); // H264
us_queue_put(h264_ctx.queue, hw, 0);
}
if (stream->raw_sink != NULL) {
us_capture_buffer_incref(hw); // RAW
us_queue_put(raw_ctx.queue, hw, 0);
}
# define QUEUE_HW(x_ctx) if (x_ctx != NULL) { \
us_capture_hwbuf_incref(hw); \
us_queue_put(x_ctx->queue, hw, 0); \
}
QUEUE_HW(jpeg_ctx);
QUEUE_HW(raw_ctx);
QUEUE_HW(h264_ctx);
# ifdef WITH_V4P
if (stream->v4p) {
us_capture_buffer_incref(hw); // DRM
us_queue_put(drm_ctx.queue, hw, 0);
}
QUEUE_HW(drm_ctx);
# endif
# undef QUEUE_HW
us_queue_put(releasers[hw->buf.index].queue, hw, 0); // Plan to release
// Мы не обновляем здесь состояние синков, потому что это происходит внутри обслуживающих их потоков
@@ -256,25 +235,18 @@ void us_stream_loop(us_stream_s *stream) {
close:
atomic_store(&threads_stop, true);
# define DELETE_WORKER(x_ctx) if (x_ctx != NULL) { \
US_THREAD_JOIN(x_ctx->tid); \
us_queue_destroy(x_ctx->queue); \
free(x_ctx); \
}
# ifdef WITH_V4P
if (stream->v4p) {
US_THREAD_JOIN(drm_ctx.tid);
us_queue_destroy(drm_ctx.queue);
}
DELETE_WORKER(drm_ctx);
# endif
if (stream->raw_sink != NULL) {
US_THREAD_JOIN(raw_ctx.tid);
us_queue_destroy(raw_ctx.queue);
}
if (run->h264 != NULL) {
US_THREAD_JOIN(h264_ctx.tid);
us_queue_destroy(h264_ctx.queue);
}
US_THREAD_JOIN(jpeg_ctx.tid);
us_queue_destroy(jpeg_ctx.queue);
DELETE_WORKER(h264_ctx);
DELETE_WORKER(raw_ctx);
DELETE_WORKER(jpeg_ctx);
# undef DELETE_WORKER
for (uint index = 0; index < n_releasers; ++index) {
US_THREAD_JOIN(releasers[index].tid);
@@ -293,34 +265,15 @@ void us_stream_loop(us_stream_s *stream) {
}
}
# ifdef WITH_V4P
US_DELETE(run->drm, us_drm_destroy);
# endif
US_DELETE(run->h264, us_h264_stream_destroy);
US_DELETE(run->h264_enc, us_m2m_encoder_destroy);
US_DELETE(run->h264_tmp_src, us_frame_destroy);
US_DELETE(run->h264_dest, us_frame_destroy);
}
void us_stream_loop_break(us_stream_s *stream) {
atomic_store(&stream->run->stop, true);
}
void us_stream_get_capture_state(us_stream_s *stream, uint *width, uint *height, bool *online, uint *captured_fps) {
const u64 state = atomic_load(&stream->run->http_capture_state);
*width = state & 0xFFFF;
*height = (state >> 16) & 0xFFFF;
*captured_fps = (state >> 32) & 0xFFFF;
*online = (state >> 48) & 1;
}
void _stream_set_capture_state(us_stream_s *stream, uint width, uint height, bool online, uint captured_fps) {
const u64 state = (
(u64)(width & 0xFFFF)
| ((u64)(height & 0xFFFF) << 16)
| ((u64)(captured_fps & 0xFFFF) << 32)
| ((u64)(online ? 1 : 0) << 48)
);
atomic_store(&stream->run->http_capture_state, state);
}
static void *_releaser_thread(void *v_ctx) {
US_THREAD_SETTLE("str_rel")
_releaser_context_s *ctx = v_ctx;
@@ -339,7 +292,7 @@ static void *_releaser_thread(void *v_ctx) {
}
US_MUTEX_LOCK(*ctx->mutex);
const int released = us_capture_release_buffer(ctx->cap, hw);
const int released = us_capture_hwbuf_release(ctx->cap, hw);
US_MUTEX_UNLOCK(*ctx->mutex);
if (released < 0) {
goto done;
@@ -364,14 +317,14 @@ static void *_jpeg_thread(void *v_ctx) {
us_encoder_job_s *const ready_job = ready_wr->job;
if (ready_job->hw != NULL) {
us_capture_buffer_decref(ready_job->hw);
us_capture_hwbuf_decref(ready_job->hw);
ready_job->hw = NULL;
if (ready_wr->job_failed) {
// pass
} else if (ready_wr->job_timely) {
_stream_expose_jpeg(stream, ready_job->dest);
if (atomic_load(&stream->run->http_snapshot_requested) > 0) { // Process real snapshots
atomic_fetch_sub(&stream->run->http_snapshot_requested, 1);
if (atomic_load(&stream->run->http->snapshot_requested) > 0) { // Process real snapshots
atomic_fetch_sub(&stream->run->http->snapshot_requested, 1);
}
US_LOG_PERF("JPEG: ##### Encoded JPEG exposed; worker=%s, latency=%.3Lf",
ready_wr->name, us_get_now_monotonic() - ready_job->dest->grab_ts);
@@ -388,7 +341,7 @@ static void *_jpeg_thread(void *v_ctx) {
const bool update_required = (stream->jpeg_sink != NULL && us_memsink_server_check(stream->jpeg_sink, NULL));
if (!update_required && !_stream_has_jpeg_clients_cached(stream)) {
US_LOG_VERBOSE("JPEG: Passed encoding because nobody is watching");
us_capture_buffer_decref(hw);
us_capture_hwbuf_decref(hw);
continue;
}
@@ -397,7 +350,7 @@ static void *_jpeg_thread(void *v_ctx) {
fluency_passed += 1;
US_LOG_VERBOSE("JPEG: Passed %u frames for fluency: now=%.03Lf, grab_after=%.03Lf",
fluency_passed, now_ts, grab_after_ts);
us_capture_buffer_decref(hw);
us_capture_hwbuf_decref(hw);
continue;
}
fluency_passed = 0;
@@ -413,50 +366,6 @@ static void *_jpeg_thread(void *v_ctx) {
return NULL;
}
static void *_h264_thread(void *v_ctx) {
US_THREAD_SETTLE("str_h264");
_worker_context_s *ctx = v_ctx;
us_h264_stream_s *h264 = ctx->stream->run->h264;
ldf grab_after_ts = 0;
ldf last_encode_ts = us_get_now_monotonic();
while (!atomic_load(ctx->stop)) {
us_capture_hwbuf_s *hw = _get_latest_hw(ctx->queue);
if (hw == NULL) {
continue;
}
if (!us_memsink_server_check(h264->sink, NULL)) {
us_capture_buffer_decref(hw);
US_LOG_VERBOSE("H264: Passed encoding because nobody is watching");
continue;
}
if (hw->raw.grab_ts < grab_after_ts) {
us_capture_buffer_decref(hw);
US_LOG_VERBOSE("H264: Passed encoding for FPS limit: %u", h264->enc->run->fps_limit);
continue;
}
// Форсим кейфрейм, если от захвата давно не было фреймов
const ldf now_ts = us_get_now_monotonic();
const bool force_key = (last_encode_ts + 0.5 < now_ts);
us_h264_stream_process(h264, &hw->raw, force_key);
last_encode_ts = now_ts;
// M2M-енкодер увеличивает задержку на 100 милисекунд при 1080p, если скормить ему больше 30 FPS.
// Поэтому у нас есть два режима: 60 FPS для маленьких видео и 30 для 1920x1080(1200).
// Следующй фрейм захватывается не раньше, чем это требуется по FPS, минус небольшая
// погрешность (если захват неравномерный) - немного меньше 1/60, и примерно треть от 1/30.
const ldf frame_interval = (ldf)1 / h264->enc->run->fps_limit;
grab_after_ts = hw->raw.grab_ts + frame_interval - 0.01;
us_capture_buffer_decref(hw);
}
return NULL;
}
static void *_raw_thread(void *v_ctx) {
US_THREAD_SETTLE("str_raw");
_worker_context_s *ctx = v_ctx;
@@ -467,14 +376,51 @@ static void *_raw_thread(void *v_ctx) {
continue;
}
if (!us_memsink_server_check(ctx->stream->raw_sink, NULL)) {
us_capture_buffer_decref(hw);
if (us_memsink_server_check(ctx->stream->raw_sink, NULL)) {
us_memsink_server_put(ctx->stream->raw_sink, &hw->raw, false);
} else {
US_LOG_VERBOSE("RAW: Passed publishing because nobody is watching");
}
us_capture_hwbuf_decref(hw);
}
return NULL;
}
static void *_h264_thread(void *v_ctx) {
US_THREAD_SETTLE("str_h264");
_worker_context_s *ctx = v_ctx;
us_stream_s *stream = ctx->stream;
ldf grab_after_ts = 0;
while (!atomic_load(ctx->stop)) {
us_capture_hwbuf_s *hw = _get_latest_hw(ctx->queue);
if (hw == NULL) {
continue;
}
us_memsink_server_put(ctx->stream->raw_sink, &hw->raw, false);
us_capture_buffer_decref(hw);
if (!us_memsink_server_check(stream->h264_sink, NULL)) {
US_LOG_VERBOSE("H264: Passed encoding because nobody is watching");
goto decref;
}
if (hw->raw.grab_ts < grab_after_ts) {
US_LOG_DEBUG("H264: Passed encoding for FPS limit");
goto decref;
}
_stream_encode_expose_h264(ctx->stream, &hw->raw, false);
// M2M-енкодер увеличивает задержку на 100 милисекунд при 1080p, если скормить ему больше 30 FPS.
// Поэтому у нас есть два режима: 60 FPS для маленьких видео и 30 для 1920x1080(1200).
// Следующй фрейм захватывается не раньше, чем это требуется по FPS, минус небольшая
// погрешность (если захват неравномерный) - немного меньше 1/60, и примерно треть от 1/30.
const uint fps_limit = stream->run->h264_enc->run->fps_limit;
if (fps_limit > 0) {
const ldf frame_interval = (ldf)1 / fps_limit;
grab_after_ts = hw->raw.grab_ts + frame_interval - 0.01;
}
decref:
us_capture_hwbuf_decref(hw);
}
return NULL;
}
@@ -483,52 +429,57 @@ static void *_raw_thread(void *v_ctx) {
static void *_drm_thread(void *v_ctx) {
US_THREAD_SETTLE("str_drm");
_worker_context_s *ctx = v_ctx;
us_stream_runtime_s *run = ctx->stream->run;
us_stream_s *stream = ctx->stream;
// Close previously opened DRM for a stub
us_drm_close(run->drm);
run->drm_opened = -1;
us_drm_close(stream->drm);
us_capture_hwbuf_s *prev_hw = NULL;
while (!atomic_load(ctx->stop)) {
# define CHECK(x_arg) if ((x_arg) < 0) { goto close; }
# define SLOWDOWN { \
ldf m_next_ts = us_get_now_monotonic() + 1; \
const ldf m_next_ts = us_get_now_monotonic() + 1; \
while (!atomic_load(ctx->stop) && us_get_now_monotonic() < m_next_ts) { \
us_capture_hwbuf_s *m_pass_hw = _get_latest_hw(ctx->queue); \
if (m_pass_hw != NULL) { \
us_capture_buffer_decref(m_pass_hw); \
us_capture_hwbuf_decref(m_pass_hw); \
} \
} \
}
CHECK(run->drm_opened = us_drm_open(run->drm, ctx->stream->cap));
CHECK(us_drm_open(stream->drm, ctx->stream->cap));
while (!atomic_load(ctx->stop)) {
CHECK(us_drm_wait_for_vsync(run->drm));
US_DELETE(prev_hw, us_capture_buffer_decref);
CHECK(us_drm_wait_for_vsync(stream->drm));
US_DELETE(prev_hw, us_capture_hwbuf_decref);
us_capture_hwbuf_s *hw = _get_latest_hw(ctx->queue);
if (hw == NULL) {
continue;
}
if (run->drm_opened == 0) {
CHECK(us_drm_expose_dma(run->drm, hw));
if (stream->drm->run->opened == 0) {
CHECK(us_drm_expose_dma(stream->drm, hw));
prev_hw = hw;
us_fpsi_meta_s meta = {.online = true}; // Online means live video
us_fpsi_update(stream->run->http->drm_fpsi, true, &meta);
continue;
}
CHECK(us_drm_expose_stub(run->drm, run->drm_opened, ctx->stream->cap));
us_capture_buffer_decref(hw);
CHECK(us_drm_expose_stub(stream->drm, stream->drm->run->opened, ctx->stream->cap));
us_capture_hwbuf_decref(hw);
us_fpsi_meta_s meta = {.online = false};
us_fpsi_update(stream->run->http->drm_fpsi, true, &meta);
SLOWDOWN;
}
close:
us_drm_close(run->drm);
run->drm_opened = -1;
US_DELETE(prev_hw, us_capture_buffer_decref);
us_drm_close(stream->drm);
US_DELETE(prev_hw, us_capture_hwbuf_decref);
us_fpsi_meta_s meta = {.online = false};
us_fpsi_update(stream->run->http->drm_fpsi, false, &meta);
SLOWDOWN;
# undef SLOWDOWN
@@ -544,7 +495,7 @@ static us_capture_hwbuf_s *_get_latest_hw(us_queue_s *queue) {
return NULL;
}
while (!us_queue_is_empty(queue)) { // Берем только самый свежий кадр
us_capture_buffer_decref(hw);
us_capture_hwbuf_decref(hw);
assert(!us_queue_get(queue, (void**)&hw, 0));
}
return hw;
@@ -553,28 +504,27 @@ static us_capture_hwbuf_s *_get_latest_hw(us_queue_s *queue) {
static bool _stream_has_jpeg_clients_cached(us_stream_s *stream) {
const us_stream_runtime_s *const run = stream->run;
return (
atomic_load(&run->http_has_clients)
|| (atomic_load(&run->http_snapshot_requested) > 0)
atomic_load(&run->http->has_clients)
|| (atomic_load(&run->http->snapshot_requested) > 0)
|| (stream->jpeg_sink != NULL && atomic_load(&stream->jpeg_sink->has_clients))
);
}
static bool _stream_has_any_clients_cached(us_stream_s *stream) {
const us_stream_runtime_s *const run = stream->run;
return (
# ifdef WITH_V4P
stream->v4p ||
# endif
_stream_has_jpeg_clients_cached(stream)
|| (run->h264 != NULL && atomic_load(&run->h264->sink->has_clients))
|| (stream->h264_sink != NULL && atomic_load(&stream->h264_sink->has_clients))
|| (stream->raw_sink != NULL && atomic_load(&stream->raw_sink->has_clients))
# ifdef WITH_V4P
|| (stream->drm != NULL)
# endif
);
}
static int _stream_init_loop(us_stream_s *stream) {
us_stream_runtime_s *const run = stream->run;
bool waiting_reported = false;
int once = 0;
while (!atomic_load(&stream->run->stop)) {
# ifdef WITH_GPIO
us_gpio_set_stream_online(false);
@@ -582,40 +532,31 @@ static int _stream_init_loop(us_stream_s *stream) {
// Флаги has_clients у синков не обновляются сами по себе, поэтому обновим их
// на каждой итерации старта стрима. После старта этим будут заниматься воркеры.
if (stream->jpeg_sink != NULL) {
us_memsink_server_check(stream->jpeg_sink, NULL);
}
if (stream->run->h264 != NULL) {
us_memsink_server_check(stream->run->h264->sink, NULL);
}
if (stream->raw_sink != NULL) {
us_memsink_server_check(stream->raw_sink, NULL);
}
# define UPDATE_SINK(x_sink) if (x_sink != NULL) { us_memsink_server_check(x_sink, NULL); }
UPDATE_SINK(stream->jpeg_sink);
UPDATE_SINK(stream->raw_sink);
UPDATE_SINK(stream->h264_sink);
# undef UPDATE_SINK
_stream_check_suicide(stream);
stream->cap->dma_export = (
stream->enc->type == US_ENCODER_TYPE_M2M_VIDEO
|| stream->enc->type == US_ENCODER_TYPE_M2M_IMAGE
|| run->h264 != NULL
|| stream->h264_sink != NULL
# ifdef WITH_V4P
|| stream->drm != NULL
# endif
);
switch (us_capture_open(stream->cap)) {
case -2:
if (!waiting_reported) {
waiting_reported = true;
US_LOG_INFO("Waiting for the capture device ...");
}
# ifdef WITH_V4P
_stream_drm_ensure_no_signal(stream);
# endif
case 0: break;
case US_ERROR_NO_DEVICE:
case US_ERROR_NO_DATA:
US_ONCE({ US_LOG_INFO("Waiting for the capture device ..."); });
goto offline_and_retry;
case -1:
waiting_reported = false;
# ifdef WITH_V4P
_stream_drm_ensure_no_signal(stream);
# endif
default:
once = 0;
goto offline_and_retry;
default: break;
}
us_encoder_open(stream->enc, stream->cap);
return 0;
@@ -635,13 +576,17 @@ static int _stream_init_loop(us_stream_s *stream) {
}
us_blank_draw(run->blank, "< NO SIGNAL >", width, height);
_stream_set_capture_state(stream, width, height, false, 0);
us_fpsi_meta_s meta = {0};
us_fpsi_frame_to_meta(run->blank->raw, &meta);
us_fpsi_update(run->http->captured_fpsi, false, &meta);
_stream_expose_jpeg(stream, run->blank->jpeg);
if (run->h264 != NULL) {
us_h264_stream_process(run->h264, run->blank->raw, true);
}
_stream_expose_raw(stream, run->blank->raw);
_stream_encode_expose_h264(stream, run->blank->raw, true);
# ifdef WITH_V4P
_stream_drm_ensure_no_signal(stream);
# endif
}
usleep(100 * 1000);
}
@@ -651,33 +596,40 @@ static int _stream_init_loop(us_stream_s *stream) {
#ifdef WITH_V4P
static void _stream_drm_ensure_no_signal(us_stream_s *stream) {
us_stream_runtime_s *const run = stream->run;
if (!stream->v4p) {
if (stream->drm == NULL) {
return;
}
if (run->drm_opened <= 0) {
us_drm_close(run->drm);
run->drm_opened = us_drm_open(run->drm, NULL);
}
if (run->drm_opened > 0) {
if (us_drm_wait_for_vsync(run->drm) == 0) {
us_drm_expose_stub(run->drm, US_DRM_STUB_NO_SIGNAL, NULL);
const us_fpsi_meta_s meta = {.online = false};
if (stream->drm->run->opened <= 0) {
us_drm_close(stream->drm);
if (us_drm_open(stream->drm, NULL) < 0) {
goto close;
}
}
if (us_drm_ensure_no_signal(stream->drm) < 0) {
goto close;
}
us_fpsi_update(stream->run->http->drm_fpsi, true, &meta);
return;
close:
us_fpsi_update(stream->run->http->drm_fpsi, false, &meta);
us_drm_close(stream->drm);
}
#endif
static void _stream_expose_jpeg(us_stream_s *stream, const us_frame_s *frame) {
us_stream_runtime_s *const run = stream->run;
int ri;
while ((ri = us_ring_producer_acquire(run->http_jpeg_ring, 0)) < 0) {
while ((ri = us_ring_producer_acquire(run->http->jpeg_ring, 0)) < 0) {
if (atomic_load(&run->stop)) {
return;
}
}
us_frame_s *const dest = run->http_jpeg_ring->items[ri];
us_frame_s *const dest = run->http->jpeg_ring->items[ri];
us_frame_copy(frame, dest);
us_ring_producer_release(run->http_jpeg_ring, ri);
us_ring_producer_release(run->http->jpeg_ring, ri);
if (stream->jpeg_sink != NULL) {
us_memsink_server_put(stream->jpeg_sink, dest, NULL);
}
@@ -689,19 +641,46 @@ static void _stream_expose_raw(us_stream_s *stream, const us_frame_s *frame) {
}
}
static void _stream_encode_expose_h264(us_stream_s *stream, const us_frame_s *frame, bool force_key) {
if (stream->h264_sink == NULL) {
return;
}
us_stream_runtime_s *run = stream->run;
us_fpsi_meta_s meta = {.online = false};
if (us_is_jpeg(frame->format)) {
if (us_unjpeg(frame, run->h264_tmp_src, true) < 0) {
goto done;
}
frame = run->h264_tmp_src;
}
if (run->h264_key_requested) {
US_LOG_INFO("H264: Requested keyframe by a sink client");
run->h264_key_requested = false;
force_key = true;
}
if (!us_m2m_encoder_compress(run->h264_enc, frame, run->h264_dest, force_key)) {
meta.online = !us_memsink_server_put(stream->h264_sink, run->h264_dest, &run->h264_key_requested);
}
done:
us_fpsi_update(run->http->h264_fpsi, meta.online, &meta);
}
static void _stream_check_suicide(us_stream_s *stream) {
if (stream->exit_on_no_clients == 0) {
return;
}
us_stream_runtime_s *const run = stream->run;
const ldf now_ts = us_get_now_monotonic();
const ull http_last_request_ts = atomic_load(&run->http_last_request_ts); // Seconds
const ull http_last_request_ts = atomic_load(&run->http->last_request_ts); // Seconds
if (_stream_has_any_clients_cached(stream)) {
atomic_store(&run->http_last_request_ts, now_ts);
atomic_store(&run->http->last_request_ts, now_ts);
} else if (http_last_request_ts + stream->exit_on_no_clients < now_ts) {
US_LOG_INFO("No requests or HTTP/sink clients found in last %u seconds, exiting ...",
stream->exit_on_no_clients);
us_process_suicide();
atomic_store(&run->http_last_request_ts, now_ts);
atomic_store(&run->http->last_request_ts, now_ts);
}
}

View File

@@ -29,41 +29,52 @@
#include "../libs/types.h"
#include "../libs/queue.h"
#include "../libs/ring.h"
#include "../libs/frame.h"
#include "../libs/memsink.h"
#include "../libs/capture.h"
#include "../libs/fpsi.h"
#ifdef WITH_V4P
# include "../libs/drm/drm.h"
#endif
#include "blank.h"
#include "encoder.h"
#include "h264.h"
#include "m2m.h"
typedef struct {
us_h264_stream_s *h264;
# ifdef WITH_V4P
us_drm_s *drm;
int drm_opened;
atomic_bool drm_live;
us_fpsi_s *drm_fpsi;
# endif
us_ring_s *http_jpeg_ring;
atomic_bool http_has_clients;
atomic_uint http_snapshot_requested;
atomic_ullong http_last_request_ts; // Seconds
atomic_ullong http_capture_state; // Bits
atomic_bool h264_online;
us_fpsi_s *h264_fpsi;
us_blank_s *blank;
us_ring_s *jpeg_ring;
atomic_bool has_clients;
atomic_uint snapshot_requested;
atomic_ullong last_request_ts; // Seconds
us_fpsi_s *captured_fpsi;
} us_stream_http_s;
atomic_bool stop;
typedef struct {
us_stream_http_s *http;
us_m2m_encoder_s *h264_enc;
us_frame_s *h264_tmp_src;
us_frame_s *h264_dest;
bool h264_key_requested;
us_blank_s *blank;
atomic_bool stop;
} us_stream_runtime_s;
typedef struct {
us_capture_s *cap;
us_encoder_s *enc;
int last_as_blank;
bool slowdown;
uint error_delay;
uint exit_on_no_clients;
@@ -77,7 +88,7 @@ typedef struct {
char *h264_m2m_path;
# ifdef WITH_V4P
bool v4p;
us_drm_s *drm;
# endif
us_stream_runtime_s *run;
@@ -89,5 +100,3 @@ void us_stream_destroy(us_stream_s *stream);
void us_stream_loop(us_stream_s *stream);
void us_stream_loop_break(us_stream_s *stream);
void us_stream_get_capture_state(us_stream_s *stream, uint *width, uint *height, bool *online, uint *captured_fps);

View File

@@ -23,6 +23,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <stdatomic.h>
#include <limits.h>
#include <string.h>
#include <unistd.h>
#include <getopt.h>
@@ -36,6 +37,7 @@
#include <sys/stat.h>
#include "../libs/types.h"
#include "../libs/errors.h"
#include "../libs/const.h"
#include "../libs/tools.h"
#include "../libs/logging.h"
@@ -172,43 +174,30 @@ static void _main_loop(void) {
cap->dma_required = true;
int once = 0;
ldf blank_at_ts = 0;
int drm_opened = -1;
while (!atomic_load(&_g_stop)) {
# define CHECK(x_arg) if ((x_arg) < 0) { goto close; }
if (atomic_load(&_g_ustreamer_online)) {
blank_at_ts = 0;
US_ONCE({ US_LOG_INFO("DRM: Online stream is active, pausing the service ..."); });
goto close;
if (drm->run->opened <= 0) {
CHECK(us_drm_open(drm, NULL));
}
if (drm_opened <= 0) {
blank_at_ts = 0;
CHECK(drm_opened = us_drm_open(drm, NULL));
if (atomic_load(&_g_ustreamer_online)) {
US_ONCE({ US_LOG_INFO("DRM: Online stream is active, pausing the service ..."); });
CHECK(us_drm_wait_for_vsync(drm));
CHECK(us_drm_expose_stub(drm, US_DRM_STUB_BUSY, NULL));
_slowdown();
continue;
}
assert(drm_opened > 0);
if (us_capture_open(cap) < 0) {
ldf now_ts = us_get_now_monotonic();
if (blank_at_ts == 0) {
blank_at_ts = now_ts + 5;
}
if (now_ts <= blank_at_ts) {
CHECK(us_drm_wait_for_vsync(drm));
CHECK(us_drm_expose_stub(drm, US_DRM_STUB_NO_SIGNAL, NULL));
} else {
US_ONCE({ US_LOG_INFO("DRM: Turning off the display by timeout ..."); });
CHECK(us_drm_dpms_power_off(drm));
}
CHECK(us_drm_ensure_no_signal(drm));
_slowdown();
continue;
}
once = 0;
blank_at_ts = 0;
us_drm_close(drm);
CHECK(drm_opened = us_drm_open(drm, cap));
CHECK(us_drm_open(drm, cap));
us_capture_hwbuf_s *prev_hw = NULL;
while (!atomic_load(&_g_stop)) {
@@ -219,34 +208,31 @@ static void _main_loop(void) {
CHECK(us_drm_wait_for_vsync(drm));
if (prev_hw != NULL) {
CHECK(us_capture_release_buffer(cap, prev_hw));
CHECK(us_capture_hwbuf_release(cap, prev_hw));
prev_hw = NULL;
}
us_capture_hwbuf_s *hw;
switch (us_capture_grab_buffer(cap, &hw)) {
case -2: continue; // Broken frame
case -1: goto close; // Any error
default: break; // Grabbed on >= 0
switch (us_capture_hwbuf_grab(cap, &hw)) {
case 0 ... INT_MAX: break; // Grabbed buffer number
case US_ERROR_NO_DATA: continue; // Broken frame
default: goto close; // Any error
}
if (drm_opened == 0) {
if (drm->run->opened == 0) {
CHECK(us_drm_expose_dma(drm, hw));
prev_hw = hw;
continue;
}
CHECK(us_drm_expose_stub(drm, drm_opened, cap));
CHECK(us_capture_release_buffer(cap, hw));
CHECK(us_drm_expose_stub(drm, drm->run->opened, cap));
CHECK(us_capture_hwbuf_release(cap, hw));
_slowdown();
}
close:
us_drm_close(drm);
drm_opened = -1;
us_capture_close(cap);
_slowdown();
# undef CHECK