Compare commits

...

69 Commits
v6.7 ... v6.15

Author SHA1 Message Date
Maxim Devaev
6f8e8205b3 Bump version: 6.14 → 6.15 2024-09-06 22:21:34 +03:00
Maxim Devaev
5f932d862b Small refactoring of #289 + manpage 2024-09-06 20:40:23 +03:00
zefir-o
590a73f9ec Add option to which allows to handle truncated frames. (#289)
Extension of c96559e4ac.
Some cheap Chinise cameras produces frames which are detected as 'broken'. However they
are later handled well.
Introduce an option which allows disable the check on demand.
2024-09-06 19:32:48 +03:00
Maxim Devaev
79bbafdc98 Bump version: 6.13 → 6.14 2024-09-04 18:56:32 +03:00
Maxim Devaev
fcecc12229 Revert "refactoring"
This reverts commit 3e228c1fb8.
2024-09-04 18:34:41 +03:00
Maxim Devaev
f79a663839 added pkgconf to deps 2024-09-04 18:31:48 +03:00
Maxim Devaev
3e228c1fb8 refactoring 2024-09-04 15:49:55 +03:00
Maxim Devaev
53ec87b416 Issue #264: Properly checking of pkg-config 2024-08-17 05:40:03 +03:00
Maxim Devaev
de8cb85605 Bump version: 6.12 → 6.13 2024-08-16 07:07:54 +03:00
Maxim Devaev
000be92a0b lint fix 2024-08-16 07:04:21 +03:00
Maxim Devaev
f2779f7b44 check for pkg-config 2024-08-16 06:38:52 +03:00
yuri@FreeBSD
dcddfddf56 Fix crash on FreeBSD due to incorrect thr_self system call invocation (#285)
The correct signature is:
int thr_self(long *id);

It was called as thr_self() which caused memory corruption.
2024-08-16 06:38:07 +03:00
Randolf Richardson 張文道
793f24c48e Update README.md (#275)
Minor spelling correction
2024-05-29 12:59:48 +03:00
Maxim Devaev
25d87d5fa8 Bump version: 6.11 → 6.12 2024-05-16 00:13:24 +03:00
Maxim Devaev
e8a7fb32ac lint fixes 2024-05-16 00:10:53 +03:00
Maxim Devaev
9d5eb8bacb fixed edid path 2024-05-16 00:01:03 +03:00
Maxim Devaev
353e58d7ca fix 2024-05-16 00:00:10 +03:00
Fabrice Fontaine
6c24c9ea61 src/libs/types.h: include sys/types.h (#273)
Include sys/types.h to avoid the following uclibc build failure since
version 5.52 and
2d6716aa47:

In file included from libs/base64.h:25,
                 from libs/base64.c:23:
libs/types.h:30:9: error: unknown type name 'ssize_t'
   30 | typedef ssize_t sz;
      |         ^~~~~~~

Fixes:
 - http://autobuild.buildroot.org/results/24498049d7beb4afaaf9f9a0c2fc0bcd26a3ee04

Signed-off-by: Fabrice Fontaine <fontaine.fabrice@gmail.com>
2024-05-15 20:56:49 +03:00
Maxim Devaev
dfeefe5a1c Bump version: 6.10 → 6.11 2024-04-05 19:31:57 +03:00
Maxim Devaev
aae090ab4e list: clean next pointer on append 2024-04-05 19:29:13 +03:00
Maxim Devaev
18038799f0 reworked pool logic 2024-04-05 19:21:42 +03:00
Maxim Devaev
fab4c47f17 list: clean prev/next pointers on remove 2024-04-05 17:48:26 +03:00
Maxim Devaev
c40b3ee225 refactoring 2024-04-04 23:25:06 +03:00
Maxim Devaev
fca69db680 us_workers_pool_wait() without side effect 2024-04-04 23:21:34 +03:00
Maxim Devaev
0d974a5faf refactoring 2024-04-04 19:37:03 +03:00
Maxim Devaev
1ed39790ba use JCS_EXT_BGR on libjpeg-turbo 2024-04-04 15:20:16 +03:00
Maxim Devaev
75a193f997 syntax fix 2024-04-04 03:58:45 +03:00
Maxim Devaev
65c652e624 encoder: removed cpu_forced logic 2024-04-04 03:44:20 +03:00
Maxim Devaev
ae2f270f50 refactoring 2024-04-04 02:36:28 +03:00
Maxim Devaev
0a639eabca deprecated noop jpeg encoder 2024-04-03 20:23:35 +03:00
Maxim Devaev
9ec59143dd Bump version: 6.9 → 6.10 2024-04-01 22:03:40 +03:00
Maxim Devaev
e059a21ef9 refactoring 2024-04-01 21:40:51 +03:00
Maxim Devaev
074ce86f67 using fps_meta instead of flags 2024-04-01 00:12:42 +03:00
Maxim Devaev
b8b67de5cf mutexless fpsi at all 2024-03-31 17:33:51 +03:00
Maxim Devaev
5f3198e72f sort of fps reset 2024-03-30 21:37:13 +02:00
Maxim Devaev
3a3889d02c fpsi: mutexless mode 2024-03-30 19:34:43 +02:00
Maxim Devaev
88203f9c53 fix 2024-03-30 19:05:59 +02:00
Maxim Devaev
24aca349a3 we don't need us_fpsi_reset() anymore 2024-03-30 19:05:15 +02:00
Maxim Devaev
a9e0cb49e9 h264 and drm statistics in http 2024-03-30 17:48:15 +02:00
Maxim Devaev
4ec3f11935 refactoring 2024-03-30 16:10:46 +02:00
Maxim Devaev
14e9d9f7af fps -> fpsi, store frame meta 2024-03-30 15:35:14 +02:00
Maxim Devaev
580ca68291 US_FRAME_META_DECLARE 2024-03-30 13:13:17 +02:00
Maxim Devaev
37f3f093dc simplified list declaration 2024-03-30 13:07:20 +02:00
Maxim Devaev
70fa6548fe common fps counter 2024-03-30 12:15:59 +02:00
Maxim Devaev
f8a703f166 refactoring 2024-03-29 22:58:07 +02:00
Maxim Devaev
3f69dd785f fix 2024-03-29 15:41:54 +02:00
Maxim Devaev
8e6c374acf refactoring 2024-03-29 15:36:43 +02:00
Maxim Devaev
caf9ed7bfe refactoring 2024-03-29 03:34:35 +02:00
Maxim Devaev
94b1224456 fix 2024-03-29 02:24:36 +02:00
Maxim Devaev
c8201df720 don't rebuild python module without necessary 2024-03-29 01:15:02 +02:00
Maxim Devaev
e0f09f65a1 new macro US_ONCE_FOR() 2024-03-29 01:02:40 +02:00
Maxim Devaev
4e1f62bfac refactoring 2024-03-29 00:13:08 +02:00
Maxim Devaev
b0b881f199 fix 2024-03-28 18:38:01 +02:00
Maxim Devaev
a21f527bce common error constants 2024-03-28 17:17:22 +02:00
Maxim Devaev
d64077c2d5 Bump version: 6.8 → 6.9 2024-03-27 21:39:03 +02:00
Maxim Devaev
83f12baa61 refactoring 2024-03-27 19:27:28 +02:00
Maxim Devaev
b6fac2608d ustreamer-v4p: bring back busy message 2024-03-27 19:22:21 +02:00
Maxim Devaev
e6ebc12505 replaced comment 2024-03-27 02:14:36 +02:00
Maxim Devaev
8c92ab6f47 ustreamer: blank drm output by timeout 2024-03-26 22:20:08 +02:00
Maxim Devaev
7dc492d875 refactoring 2024-03-26 21:51:47 +02:00
Maxim Devaev
d43014346d Bump version: 6.7 → 6.8 2024-03-26 20:23:16 +02:00
Maxim Devaev
bcd447963c build fix 2024-03-26 20:22:10 +02:00
Maxim Devaev
eec6cfd0d4 lint fix 2024-03-26 20:10:06 +02:00
Maxim Devaev
f177300e69 ustreamer/drm: fixed assertion 2024-03-26 18:59:33 +02:00
Maxim Devaev
7015a26a63 Userspace workaround for the wrong TC358743 RGB bytes ordering
- https://github.com/raspberrypi/linux/issues/6068
2024-03-26 18:35:13 +02:00
Maxim Devaev
290282b6b6 drm: fixed big endian case for rgb/bgr 2024-03-26 18:05:51 +02:00
Maxim Devaev
a339ff5d06 v4p mode in ustreamer 2024-03-26 17:45:53 +02:00
Maxim Devaev
8d4e9a6ca0 renamed us_hw_buffer_s to us_capture_hwbuf_s 2024-03-26 01:54:01 +02:00
Maxim Devaev
f0f5fcd67f renamed us_device* to us_capture* 2024-03-26 01:25:04 +02:00
53 changed files with 1612 additions and 1323 deletions

View File

@@ -1,7 +1,7 @@
[bumpversion]
commit = True
tag = True
current_version = 6.7
current_version = 6.15
parse = (?P<major>\d+)\.(?P<minor>\d+)
serialize =
{major}.{minor}

View File

@@ -16,6 +16,12 @@ export
_LINTERS_IMAGE ?= ustreamer-linters
# =====
ifeq (__not_found__,$(shell which pkg-config 2>/dev/null || echo "__not_found__"))
$(error "No pkg-config found in $(PATH)")
endif
# =====
define optbool
$(filter $(shell echo $(1) | tr A-Z a-z), yes on 1)

View File

@@ -23,7 +23,7 @@
| Compatibility with mjpg-streamer's API | ✔ | :) |
Footnotes:
* ```1``` Long before µStreamer, I made a [patch](https://github.com/jacksonliam/mjpg-streamer/pull/164) to add DV-timings support to mjpg-streamer and to keep it from hanging up no device disconnection. Alas, the patch is far from perfect and I can't guarantee it will work every time - mjpg-streamer's source code is very complicated and its structure is hard to understand. With this in mind, along with needing multithreading and JPEG hardware acceleration in the future, I decided to make my own stream server from scratch instead of supporting legacy code.
* ```1``` Long before µStreamer, I made a [patch](https://github.com/jacksonliam/mjpg-streamer/pull/164) to add DV-timings support to mjpg-streamer and to keep it from hanging up on device disconnection. Alas, the patch is far from perfect and I can't guarantee it will work every time - mjpg-streamer's source code is very complicated and its structure is hard to understand. With this in mind, along with needing multithreading and JPEG hardware acceleration in the future, I decided to make my own stream server from scratch instead of supporting legacy code.
* ```2``` This feature allows to cut down outgoing traffic several-fold when streaming HDMI, but it increases CPU usage a little bit. The idea is that HDMI is a fully digital interface and each captured frame can be identical to the previous one byte-wise. There's no need to stream the same image over the net several times a second. With the `--drop-same-frames=20` option enabled, µStreamer will drop all the matching frames (with a limit of 20 in a row). Each new frame is matched with the previous one first by length, then using ```memcmp()```.

View File

@@ -32,6 +32,7 @@
#include <opus/opus.h>
#include "uslibs/types.h"
#include "uslibs/errors.h"
#include "uslibs/tools.h"
#include "uslibs/array.h"
#include "uslibs/ring.h"
@@ -185,12 +186,12 @@ int us_audio_get_encoded(us_audio_s *audio, u8 *data, uz *size, u64 *pts) {
}
const int ri = us_ring_consumer_acquire(audio->enc_ring, 0.1);
if (ri < 0) {
return -2;
return US_ERROR_NO_DATA;
}
const _enc_buffer_s *const buf = audio->enc_ring->items[ri];
if (*size < buf->used) {
us_ring_consumer_release(audio->enc_ring, ri);
return -3;
return US_ERROR_NO_DATA;
}
memcpy(data, buf->data, buf->used);
*size = buf->used;

View File

@@ -34,7 +34,7 @@
#include "rtp.h"
typedef struct us_janus_client_sx {
typedef struct {
janus_callbacks *gw;
janus_plugin_session *session;
atomic_bool transmit;
@@ -48,7 +48,7 @@ typedef struct us_janus_client_sx {
us_ring_s *video_ring;
us_ring_s *audio_ring;
US_LIST_STRUCT(struct us_janus_client_sx);
US_LIST_DECLARE;
} us_janus_client_s;

View File

@@ -27,6 +27,7 @@
#include <linux/videodev2.h>
#include "uslibs/types.h"
#include "uslibs/errors.h"
#include "uslibs/tools.h"
#include "uslibs/frame.h"
#include "uslibs/memsinksh.h"
@@ -54,7 +55,7 @@ int us_memsink_fd_wait_frame(int fd, us_memsink_shared_s *mem, u64 last_id) {
}
usleep(1000); // lock_polling
} while (now_ts < deadline_ts);
return -2;
return US_ERROR_NO_DATA;
}
int us_memsink_fd_get_frame(int fd, us_memsink_shared_s *mem, us_frame_s *frame, u64 *frame_id, bool key_required) {

View File

@@ -37,6 +37,7 @@
#include "uslibs/types.h"
#include "uslibs/const.h"
#include "uslibs/errors.h"
#include "uslibs/tools.h"
#include "uslibs/threading.h"
#include "uslibs/list.h"
@@ -178,7 +179,7 @@ static void *_video_sink_thread(void *arg) {
if (ri >= 0 && frame->key) {
atomic_store(&_g_key_required, false);
}
} else if (waited != -2) {
} else if (waited != US_ERROR_NO_DATA) {
goto close_memsink;
}
}

1
janus/src/uslibs/errors.h Symbolic link
View File

@@ -0,0 +1 @@
../../../src/libs/errors.h

View File

@@ -3,7 +3,7 @@ envlist = cppcheck, flake8, pylint, mypy, vulture, htmlhint
skipsdist = true
[testenv]
basepython = python3.11
basepython = python3.12
changedir = /src
[testenv:cppcheck]
@@ -13,6 +13,7 @@ commands = cppcheck \
--std=c17 \
--error-exitcode=1 \
--quiet \
--check-level=exhaustive \
--enable=warning,portability,performance,style \
--suppress=assignmentInAssert \
--suppress=variableScope \
@@ -25,7 +26,7 @@ commands = cppcheck \
allowlist_externals = bash
commands = bash -c 'flake8 --config=linters/flake8.ini tools/*.py' python/*.py
deps =
flake8==5.0.4
flake8
flake8-quotes
[testenv:pylint]
@@ -33,6 +34,7 @@ allowlist_externals = bash
commands = bash -c 'pylint --rcfile=linters/pylint.ini --output-format=colorized --reports=no tools/*.py python/*.py'
deps =
pylint
setuptools
[testenv:mypy]
allowlist_externals = bash

View File

@@ -1,6 +1,6 @@
.\" Manpage for ustreamer-dump.
.\" Open an issue or pull request to https://github.com/pikvm/ustreamer to correct errors or typos
.TH USTREAMER-DUMP 1 "version 6.7" "January 2021"
.TH USTREAMER-DUMP 1 "version 6.15" "January 2021"
.SH NAME
ustreamer-dump \- Dump uStreamer's memory sink to file

View File

@@ -1,6 +1,6 @@
.\" Manpage for ustreamer.
.\" Open an issue or pull request to https://github.com/pikvm/ustreamer to correct errors or typos
.TH USTREAMER 1 "version 6.7" "November 2020"
.TH USTREAMER 1 "version 6.15" "November 2020"
.SH NAME
ustreamer \- stream MJPEG video from any V4L2 device to the network
@@ -68,6 +68,9 @@ Desired FPS. Default: maximum possible.
.BR \-z\ \fIN ", " \-\-min\-frame\-size\ \fIN
Drop frames smaller then this limit. Useful if the device produces small\-sized garbage frames. Default: 128 bytes.
.TP
.BR \-T ", " \-\-allow\-truncated\-frames
Allows to handle truncated frames. Useful if the device produces incorrect but still acceptable frames. Default: disabled.
.TP
.BR \-n ", " \-\-persistent
Suppress repetitive signal source errors. Default: disabled.
.TP
@@ -96,8 +99,6 @@ HW ─ Use pre-encoded MJPEG frames directly from camera hardware.
M2M-VIDEO ─ GPU-accelerated MJPEG encoding.
M2M-IMAGE ─ GPU-accelerated JPEG encoding.
NOOP ─ Don't compress MJPEG stream (do nothing).
.TP
.BR \-g\ \fIWxH,... ", " \-\-glitched\-resolutions\ \fIWxH,...
It doesn't do anything. Still here for compatibility.

View File

@@ -3,14 +3,14 @@
pkgname=ustreamer
pkgver=6.7
pkgver=6.15
pkgrel=1
pkgdesc="Lightweight and fast MJPEG-HTTP streamer"
url="https://github.com/pikvm/ustreamer"
license=(GPL)
arch=(i686 x86_64 armv6h armv7h aarch64)
depends=(libjpeg libevent libbsd libgpiod systemd)
makedepends=(gcc make systemd)
makedepends=(gcc make pkgconf systemd)
source=(${pkgname}::"git+https://github.com/pikvm/ustreamer#commit=v${pkgver}")
md5sums=(SKIP)

View File

@@ -24,7 +24,7 @@ RUN apk add --no-cache \
WORKDIR /ustreamer
COPY --from=build /build/ustreamer/src/ustreamer.bin ustreamer
RUN wget https://raw.githubusercontent.com/pikvm/kvmd/master/configs/kvmd/edid/v3-hdmi.hex -O /edid.hex
RUN wget https://raw.githubusercontent.com/pikvm/kvmd/master/configs/kvmd/edid/v2.hex -O /edid.hex
COPY pkg/docker/entry.sh /
EXPOSE 8080

View File

@@ -6,7 +6,7 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=ustreamer
PKG_VERSION:=6.7
PKG_VERSION:=6.15
PKG_RELEASE:=1
PKG_MAINTAINER:=Maxim Devaev <mdevaev@gmail.com>

View File

@@ -7,7 +7,8 @@ PY ?= python3
# =====
all:
all: root
root: $(shell find src -type f,l)
$(info == PY_BUILD ustreamer-*.so)
rm -rf root
$(ECHO) $(PY) -m build --skip-dependency-check --no-isolation

View File

@@ -17,7 +17,7 @@ def _find_sources(suffix: str) -> list[str]:
if __name__ == "__main__":
setup(
name="ustreamer",
version="6.7",
version="6.15",
description="uStreamer tools",
author="Maxim Devaev",
author_email="mdevaev@gmail.com",

1
python/src/uslibs/errors.h Symbolic link
View File

@@ -0,0 +1 @@
../../../src/libs/errors.h

View File

@@ -14,6 +14,7 @@
#include <Python.h>
#include "uslibs/types.h"
#include "uslibs/errors.h"
#include "uslibs/tools.h"
#include "uslibs/frame.h"
#include "uslibs/memsinksh.h"
@@ -175,9 +176,9 @@ static int _wait_frame(_MemsinkObject *self) {
if (PyErr_CheckSignals() < 0) {
return -1;
}
} while (now_ts < deadline_ts);
return -2;
return US_ERROR_NO_DATA;
}
static PyObject *_MemsinkObject_wait_frame(_MemsinkObject *self, PyObject *args, PyObject *kwargs) {
@@ -194,7 +195,7 @@ static PyObject *_MemsinkObject_wait_frame(_MemsinkObject *self, PyObject *args,
switch (_wait_frame(self)) {
case 0: break;
case -2: Py_RETURN_NONE;
case US_ERROR_NO_DATA: Py_RETURN_NONE;
default: return NULL;
}

View File

@@ -12,11 +12,11 @@ _DUMP = ustreamer-dump.bin
_V4P = ustreamer-v4p.bin
_CFLAGS = -MD -c -std=c17 -Wall -Wextra -D_GNU_SOURCE $(CFLAGS)
_LDFLAGS = $(LDFLAGS)
_COMMON_LIBS = -lm -ljpeg -pthread -lrt -latomic
_USTR_LDFLAGS = $(LDFLAGS) -lm -ljpeg -pthread -lrt -latomic -levent -levent_pthreads
_DUMP_LDFLAGS = $(LDFLAGS) -lm -ljpeg -pthread -lrt -latomic
_V4P_LDFLAGS = $(LDFLAGS) -lm -ljpeg -pthread -lrt -latomic
_USTR_LIBS = $(_COMMON_LIBS) -levent -levent_pthreads
_USTR_SRCS = $(shell ls \
libs/*.c \
ustreamer/*.c \
@@ -27,15 +27,14 @@ _USTR_SRCS = $(shell ls \
ustreamer/*.c \
)
_DUMP_LIBS = $(_COMMON_LIBS)
_DUMP_SRCS = $(shell ls \
libs/*.c \
dump/*.c \
)
_V4P_LIBS = $(_COMMON_LIBS)
_V4P_SRCS = $(shell ls \
libs/*.c \
libs/drm/*.c \
v4p/*.c \
)
@@ -52,16 +51,16 @@ endef
ifneq ($(call optbool,$(WITH_GPIO)),)
_USTR_LIBS += -lgpiod
override _CFLAGS += -DWITH_GPIO $(shell pkg-config --atleast-version=2 libgpiod 2> /dev/null && echo -DHAVE_GPIOD2)
_USTR_SRCS += $(shell ls ustreamer/gpio/*.c)
override _USTR_LDFLAGS += -lgpiod
override _USTR_SRCS += $(shell ls ustreamer/gpio/*.c)
endif
ifneq ($(call optbool,$(WITH_SYSTEMD)),)
_USTR_LIBS += -lsystemd
override _CFLAGS += -DWITH_SYSTEMD
_USTR_SRCS += $(shell ls ustreamer/http/systemd/*.c)
override _USTR_LDFLAGS += -lsystemd
override _USTR_SRCS += $(shell ls ustreamer/http/systemd/*.c)
endif
@@ -73,10 +72,10 @@ endif
WITH_SETPROCTITLE ?= 1
ifneq ($(call optbool,$(WITH_SETPROCTITLE)),)
ifeq ($(shell uname -s | tr A-Z a-z),linux)
_USTR_LIBS += -lbsd
endif
override _CFLAGS += -DWITH_SETPROCTITLE
ifeq ($(shell uname -s | tr A-Z a-z),linux)
override _USTR_LDFLAGS += -lbsd
endif
endif
@@ -84,8 +83,10 @@ WITH_V4P ?= 0
ifneq ($(call optbool,$(WITH_V4P)),)
override _TARGETS += $(_V4P)
override _OBJS += $(_V4P_SRCS:%.c=$(_BUILD)/%.o)
override _CFLAGS += $(shell pkg-config --cflags libdrm)
_V4P_LDFLAGS = $(shell pkg-config --libs libdrm)
override _CFLAGS += -DWITH_V4P $(shell pkg-config --cflags libdrm)
override _V4P_LDFLAGS += $(shell pkg-config --libs libdrm)
override _USTR_SRCS += $(shell ls libs/drm/*.c)
override _USTR_LDFLAGS += $(shell pkg-config --libs libdrm)
endif
@@ -108,17 +109,17 @@ install-strip: install
$(_USTR): $(_USTR_SRCS:%.c=$(_BUILD)/%.o)
$(info == LD $@)
$(ECHO) $(CC) $^ -o $@ $(_LDFLAGS) $(_USTR_LIBS)
$(ECHO) $(CC) $^ -o $@ $(_USTR_LDFLAGS)
$(_DUMP): $(_DUMP_SRCS:%.c=$(_BUILD)/%.o)
$(info == LD $@)
$(ECHO) $(CC) $^ -o $@ $(_LDFLAGS) $(_DUMP_LIBS)
$(ECHO) $(CC) $^ -o $@ $(_DUMP_LDFLAGS)
$(_V4P): $(_V4P_SRCS:%.c=$(_BUILD)/%.o)
$(info == LD $@)
$(ECHO) $(CC) $^ -o $@ $(_LDFLAGS) $(_V4P_LDFLAGS) $(_V4P_LIBS)
$(ECHO) $(CC) $^ -o $@ $(_V4P_LDFLAGS)
$(_BUILD)/%.o: %.c

View File

@@ -31,10 +31,12 @@
#include <assert.h>
#include "../libs/const.h"
#include "../libs/errors.h"
#include "../libs/tools.h"
#include "../libs/logging.h"
#include "../libs/frame.h"
#include "../libs/memsink.h"
#include "../libs/fpsi.h"
#include "../libs/signal.h"
#include "../libs/options.h"
@@ -109,9 +111,9 @@ int main(int argc, char *argv[]) {
US_LOGGING_INIT;
US_THREAD_RENAME("main");
char *sink_name = NULL;
const char *sink_name = NULL;
unsigned sink_timeout = 1;
char *output_path = NULL;
const char *output_path = NULL;
bool output_json = false;
long long count = 0;
long double interval = 0;
@@ -220,26 +222,22 @@ static int _dump_sink(
const useconds_t interval_us = interval * 1000000;
us_frame_s *frame = us_frame_init();
us_fpsi_s *fpsi = us_fpsi_init("SINK", false);
us_memsink_s *sink = NULL;
if ((sink = us_memsink_init("input", sink_name, false, 0, false, 0, sink_timeout)) == NULL) {
if ((sink = us_memsink_init_opened("input", sink_name, false, 0, false, 0, sink_timeout)) == NULL) {
goto error;
}
unsigned fps = 0;
unsigned fps_accum = 0;
long long fps_second = 0;
long double last_ts = 0;
while (!_g_stop) {
bool key_requested;
const int error = us_memsink_client_get(sink, frame, &key_requested, key_required);
if (error == 0) {
const int got = us_memsink_client_get(sink, frame, &key_requested, key_required);
if (got == 0) {
key_required = false;
const long double now = us_get_now_monotonic();
const long long now_second = us_floor_ms(now);
char fourcc_str[8];
US_LOG_VERBOSE("Frame: %s - %ux%u -- online=%d, key=%d, kr=%d, gop=%u, latency=%.3Lf, backlog=%.3Lf, size=%zu",
@@ -253,13 +251,7 @@ static int _dump_sink(
US_LOG_DEBUG(" stride=%u, grab_ts=%.3Lf, encode_begin_ts=%.3Lf, encode_end_ts=%.3Lf",
frame->stride, frame->grab_ts, frame->encode_begin_ts, frame->encode_end_ts);
if (now_second != fps_second) {
fps = fps_accum;
fps_accum = 0;
fps_second = now_second;
US_LOG_PERF_FPS("A new second has come; captured_fps=%u", fps);
}
fps_accum += 1;
us_fpsi_update(fpsi, true, NULL);
if (ctx->v_output != NULL) {
ctx->write(ctx->v_output, frame);
@@ -275,7 +267,7 @@ static int _dump_sink(
if (interval_us > 0) {
usleep(interval_us);
}
} else if (error == -2) {
} else if (got == US_ERROR_NO_DATA) {
usleep(1000);
} else {
goto error;
@@ -286,6 +278,7 @@ static int _dump_sink(
error:
US_DELETE(sink, us_memsink_destroy);
us_fpsi_destroy(fpsi);
us_frame_destroy(frame);
US_LOG_INFO("Bye-bye");
return retval;

File diff suppressed because it is too large Load Diff

View File

@@ -49,7 +49,7 @@ typedef struct {
int dma_fd;
bool grabbed;
atomic_int refs;
} us_hw_buffer_s;
} us_capture_hwbuf_s;
typedef struct {
int fd;
@@ -62,13 +62,13 @@ typedef struct {
uint jpeg_quality;
uz raw_size;
uint n_bufs;
us_hw_buffer_s *hw_bufs;
us_capture_hwbuf_s *bufs;
bool dma;
enum v4l2_buf_type capture_type;
bool capture_mplane;
bool streamon;
int open_error_reported;
} us_device_runtime_s;
int open_error_once;
} us_capture_runtime_s;
typedef enum {
CTL_MODE_NONE = 0,
@@ -104,6 +104,8 @@ typedef struct {
uint width;
uint height;
uint format;
bool format_swap_rgb;
uint jpeg_quality;
v4l2_std_id standard;
enum v4l2_memory io_method;
@@ -113,25 +115,26 @@ typedef struct {
bool dma_required;
uint desired_fps;
uz min_frame_size;
bool allow_truncated_frames;
bool persistent;
uint timeout;
us_controls_s ctl;
us_device_runtime_s *run;
} us_device_s;
us_capture_runtime_s *run;
} us_capture_s;
us_device_s *us_device_init(void);
void us_device_destroy(us_device_s *dev);
us_capture_s *us_capture_init(void);
void us_capture_destroy(us_capture_s *cap);
int us_device_parse_format(const char *str);
int us_device_parse_standard(const char *str);
int us_device_parse_io_method(const char *str);
int us_capture_parse_format(const char *str);
int us_capture_parse_standard(const char *str);
int us_capture_parse_io_method(const char *str);
int us_device_open(us_device_s *dev);
void us_device_close(us_device_s *dev);
int us_capture_open(us_capture_s *cap);
void us_capture_close(us_capture_s *cap);
int us_device_grab_buffer(us_device_s *dev, us_hw_buffer_s **hw);
int us_device_release_buffer(us_device_s *dev, us_hw_buffer_s *hw);
int us_capture_hwbuf_grab(us_capture_s *cap, us_capture_hwbuf_s **hw);
int us_capture_hwbuf_release(const us_capture_s *cap, us_capture_hwbuf_s *hw);
void us_device_buffer_incref(us_hw_buffer_s *hw);
void us_device_buffer_decref(us_hw_buffer_s *hw);
void us_capture_hwbuf_incref(us_capture_hwbuf_s *hw);
void us_capture_hwbuf_decref(us_capture_hwbuf_s *hw);

View File

@@ -26,7 +26,7 @@
#define US_VERSION_MAJOR 6
#define US_VERSION_MINOR 7
#define US_VERSION_MINOR 15
#define US_MAKE_VERSION2(_major, _minor) #_major "." #_minor
#define US_MAKE_VERSION1(_major, _minor) US_MAKE_VERSION2(_major, _minor)

View File

@@ -37,17 +37,19 @@
#include <drm_fourcc.h>
#include <libdrm/drm.h>
#include "../libs/types.h"
#include "../libs/tools.h"
#include "../libs/logging.h"
#include "../libs/frame.h"
#include "../libs/frametext.h"
#include "../types.h"
#include "../errors.h"
#include "../tools.h"
#include "../logging.h"
#include "../frame.h"
#include "../frametext.h"
#include "../capture.h"
static void _drm_vsync_callback(int fd, uint n_frame, uint sec, uint usec, void *v_buf);
static int _drm_check_status(us_drm_s *drm);
static void _drm_ensure_dpms_power(us_drm_s *drm, bool on);
static int _drm_init_buffers(us_drm_s *drm, const us_device_s *dev);
static int _drm_init_buffers(us_drm_s *drm, const us_capture_s *cap);
static int _drm_find_sink(us_drm_s *drm, uint width, uint height, float hz);
static drmModeModeInfo *_find_best_mode(drmModeConnector *conn, uint width, uint height, float hz);
@@ -57,11 +59,11 @@ static const char *_connector_type_to_string(u32 type);
static float _get_refresh_rate(const drmModeModeInfo *mode);
#define _D_LOG_ERROR(x_msg, ...) US_LOG_ERROR("DRM: " x_msg, ##__VA_ARGS__)
#define _D_LOG_PERROR(x_msg, ...) US_LOG_PERROR("DRM: " x_msg, ##__VA_ARGS__)
#define _D_LOG_INFO(x_msg, ...) US_LOG_INFO("DRM: " x_msg, ##__VA_ARGS__)
#define _D_LOG_VERBOSE(x_msg, ...) US_LOG_VERBOSE("DRM: " x_msg, ##__VA_ARGS__)
#define _D_LOG_DEBUG(x_msg, ...) US_LOG_DEBUG("DRM: " x_msg, ##__VA_ARGS__)
#define _LOG_ERROR(x_msg, ...) US_LOG_ERROR("DRM: " x_msg, ##__VA_ARGS__)
#define _LOG_PERROR(x_msg, ...) US_LOG_PERROR("DRM: " x_msg, ##__VA_ARGS__)
#define _LOG_INFO(x_msg, ...) US_LOG_INFO("DRM: " x_msg, ##__VA_ARGS__)
#define _LOG_VERBOSE(x_msg, ...) US_LOG_VERBOSE("DRM: " x_msg, ##__VA_ARGS__)
#define _LOG_DEBUG(x_msg, ...) US_LOG_DEBUG("DRM: " x_msg, ##__VA_ARGS__)
us_drm_s *us_drm_init(void) {
@@ -70,6 +72,7 @@ us_drm_s *us_drm_init(void) {
run->fd = -1;
run->status_fd = -1;
run->dpms_state = -1;
run->opened = -1;
run->has_vsync = true;
run->exposing_dma_fd = -1;
run->ft = us_frametext_init();
@@ -78,8 +81,9 @@ us_drm_s *us_drm_init(void) {
US_CALLOC(drm, 1);
// drm->path = "/dev/dri/card0";
drm->path = "/dev/dri/by-path/platform-gpu-card";
drm->port = "HDMI-A-1";
drm->port = "HDMI-A-2"; // OUT2 on PiKVM V4 Plus
drm->timeout = 5;
drm->blank_after = 5;
drm->run = run;
return drm;
}
@@ -90,44 +94,45 @@ void us_drm_destroy(us_drm_s *drm) {
US_DELETE(drm, free); // cppcheck-suppress uselessAssignmentPtrArg
}
int us_drm_open(us_drm_s *drm, const us_device_s *dev) {
int us_drm_open(us_drm_s *drm, const us_capture_s *cap) {
us_drm_runtime_s *const run = drm->run;
assert(run->fd < 0);
switch (_drm_check_status(drm)) {
case 0: break;
case -2: goto unplugged;
case US_ERROR_NO_DEVICE: goto unplugged;
default: goto error;
}
_D_LOG_INFO("Configuring DRM device for %s ...", (dev == NULL ? "STUB" : "DMA"));
_LOG_INFO("Using passthrough: %s[%s]", drm->path, drm->port);
_LOG_INFO("Configuring DRM device for %s ...", (cap == NULL ? "STUB" : "DMA"));
if ((run->fd = open(drm->path, O_RDWR | O_CLOEXEC | O_NONBLOCK)) < 0) {
_D_LOG_PERROR("Can't open DRM device");
_LOG_PERROR("Can't open DRM device");
goto error;
}
_D_LOG_DEBUG("DRM device fd=%d opened", run->fd);
_LOG_DEBUG("DRM device fd=%d opened", run->fd);
int stub = 0; // Open the real device with DMA
if (dev == NULL) {
if (cap == NULL) {
stub = US_DRM_STUB_USER;
} else if (dev->run->format != V4L2_PIX_FMT_RGB24) {
} else if (cap->run->format != V4L2_PIX_FMT_RGB24 && cap->run->format != V4L2_PIX_FMT_BGR24) {
stub = US_DRM_STUB_BAD_FORMAT;
char fourcc_str[8];
us_fourcc_to_string(dev->run->format, fourcc_str, 8);
_D_LOG_ERROR("Input format %s is not supported, forcing to STUB ...", fourcc_str);
us_fourcc_to_string(cap->run->format, fourcc_str, 8);
_LOG_ERROR("Input format %s is not supported, forcing to STUB ...", fourcc_str);
}
# define CHECK_CAP(x_cap) { \
_D_LOG_DEBUG("Checking %s ...", #x_cap); \
_LOG_DEBUG("Checking %s ...", #x_cap); \
u64 m_check; \
if (drmGetCap(run->fd, x_cap, &m_check) < 0) { \
_D_LOG_PERROR("Can't check " #x_cap); \
_LOG_PERROR("Can't check " #x_cap); \
goto error; \
} \
if (!m_check) { \
_D_LOG_ERROR(#x_cap " is not supported"); \
_LOG_ERROR(#x_cap " is not supported"); \
goto error; \
} \
}
@@ -137,48 +142,49 @@ int us_drm_open(us_drm_s *drm, const us_device_s *dev) {
}
# undef CHECK_CAP
const uint width = (stub > 0 ? 0 : dev->run->width);
const uint height = (stub > 0 ? 0 : dev->run->height);
const uint hz = (stub > 0 ? 0 : dev->run->hz);
const uint width = (stub > 0 ? 0 : cap->run->width);
const uint height = (stub > 0 ? 0 : cap->run->height);
const uint hz = (stub > 0 ? 0 : cap->run->hz);
switch (_drm_find_sink(drm, width, height, hz)) {
case 0: break;
case -2: goto unplugged;
case US_ERROR_NO_DEVICE: goto unplugged;
default: goto error;
}
if ((stub == 0) && (width != run->mode.hdisplay || height < run->mode.vdisplay)) {
// We'll try to show something instead of nothing if height != vdisplay
stub = US_DRM_STUB_BAD_RESOLUTION;
_D_LOG_ERROR("There is no appropriate modes for the capture, forcing to STUB ...");
_LOG_ERROR("There is no appropriate modes for the capture, forcing to STUB ...");
}
if (_drm_init_buffers(drm, (stub > 0 ? NULL : dev)) < 0) {
if (_drm_init_buffers(drm, (stub > 0 ? NULL : cap)) < 0) {
goto error;
}
run->saved_crtc = drmModeGetCrtc(run->fd, run->crtc_id);
_D_LOG_DEBUG("Setting up CRTC ...");
_LOG_DEBUG("Setting up CRTC ...");
if (drmModeSetCrtc(run->fd, run->crtc_id, run->bufs[0].id, 0, 0, &run->conn_id, 1, &run->mode) < 0) {
_D_LOG_PERROR("Can't set CRTC");
_LOG_PERROR("Can't set CRTC");
goto error;
}
run->opened_for_stub = (stub > 0);
_LOG_INFO("Opened for %s ...", (stub > 0 ? "STUB" : "DMA"));
run->exposing_dma_fd = -1;
run->unplugged_reported = false;
_D_LOG_INFO("Opened for %s ...", (run->opened_for_stub ? "STUB" : "DMA"));
return stub;
run->blank_at_ts = 0;
run->opened = stub;
run->once = 0;
return run->opened;
error:
us_drm_close(drm);
return -1;
return run->opened; // -1 after us_drm_close()
unplugged:
if (!run->unplugged_reported) {
_D_LOG_ERROR("Display is not plugged");
run->unplugged_reported = true;
}
US_ONCE_FOR(run->once, __LINE__, {
_LOG_ERROR("Display is not plugged");
});
us_drm_close(drm);
return -2;
run->opened = US_ERROR_NO_DEVICE;
return run->opened;
}
void us_drm_close(us_drm_s *drm) {
@@ -193,33 +199,33 @@ void us_drm_close(us_drm_s *drm) {
}
if (run->saved_crtc != NULL) {
_D_LOG_DEBUG("Restoring CRTC ...");
_LOG_DEBUG("Restoring CRTC ...");
if (drmModeSetCrtc(run->fd,
run->saved_crtc->crtc_id, run->saved_crtc->buffer_id,
run->saved_crtc->x, run->saved_crtc->y,
&run->conn_id, 1, &run->saved_crtc->mode
) < 0 && errno != ENOENT) {
_D_LOG_PERROR("Can't restore CRTC");
_LOG_PERROR("Can't restore CRTC");
}
drmModeFreeCrtc(run->saved_crtc);
run->saved_crtc = NULL;
}
if (run->bufs != NULL) {
_D_LOG_DEBUG("Releasing buffers ...");
_LOG_DEBUG("Releasing buffers ...");
for (uint n_buf = 0; n_buf < run->n_bufs; ++n_buf) {
us_drm_buffer_s *const buf = &run->bufs[n_buf];
if (buf->fb_added && drmModeRmFB(run->fd, buf->id) < 0) {
_D_LOG_PERROR("Can't remove buffer=%u", n_buf);
_LOG_PERROR("Can't remove buffer=%u", n_buf);
}
if (buf->dumb_created) {
struct drm_mode_destroy_dumb destroy = {.handle = buf->handle};
if (drmIoctl(run->fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy) < 0) {
_D_LOG_PERROR("Can't destroy dumb buffer=%u", n_buf);
_LOG_PERROR("Can't destroy dumb buffer=%u", n_buf);
}
}
if (buf->data != NULL && munmap(buf->data, buf->allocated)) {
_D_LOG_PERROR("Can't unmap buffer=%u", n_buf);
_LOG_PERROR("Can't unmap buffer=%u", n_buf);
}
}
US_DELETE(run->bufs, free);
@@ -232,19 +238,48 @@ void us_drm_close(us_drm_s *drm) {
run->crtc_id = 0;
run->dpms_state = -1;
run->opened = -1;
run->has_vsync = true;
run->stub_n_buf = 0;
if (say) {
_D_LOG_INFO("Closed");
_LOG_INFO("Closed");
}
}
int us_drm_ensure_no_signal(us_drm_s *drm) {
us_drm_runtime_s *const run = drm->run;
assert(run->fd >= 0);
assert(run->opened > 0);
const ldf now_ts = us_get_now_monotonic();
if (run->blank_at_ts == 0) {
run->blank_at_ts = now_ts + drm->blank_after;
}
const ldf saved_ts = run->blank_at_ts; // us_drm*() rewrites it to 0
int retval;
if (now_ts <= run->blank_at_ts) {
retval = us_drm_wait_for_vsync(drm);
if (retval == 0) {
retval = us_drm_expose_stub(drm, US_DRM_STUB_NO_SIGNAL, NULL);
}
} else {
US_ONCE_FOR(run->once, __LINE__, {
_LOG_INFO("Turning off the display by timeout ...");
});
retval = us_drm_dpms_power_off(drm);
}
run->blank_at_ts = saved_ts;
return retval;
}
int us_drm_dpms_power_off(us_drm_s *drm) {
assert(drm->run->fd >= 0);
switch (_drm_check_status(drm)) {
case 0: break;
case -2: return 0; // Unplugged, nice
case US_ERROR_NO_DEVICE: return 0; // Unplugged, nice
// Во время переключения DPMS монитор моргает один раз состоянием disconnected,
// а потом почему-то снова оказывается connected. Так что просто считаем,
// что отсоединенный монитор на этом этапе - это нормально.
@@ -258,10 +293,11 @@ int us_drm_wait_for_vsync(us_drm_s *drm) {
us_drm_runtime_s *const run = drm->run;
assert(run->fd >= 0);
run->blank_at_ts = 0;
switch (_drm_check_status(drm)) {
case 0: break;
case -2: return -2;
case US_ERROR_NO_DEVICE: return US_ERROR_NO_DEVICE;
default: return -1;
}
_drm_ensure_dpms_power(drm, true);
@@ -275,13 +311,13 @@ int us_drm_wait_for_vsync(us_drm_s *drm) {
FD_ZERO(&fds);
FD_SET(run->fd, &fds);
_D_LOG_DEBUG("Calling select() for VSync ...");
_LOG_DEBUG("Calling select() for VSync ...");
const int result = select(run->fd + 1, &fds, NULL, NULL, &timeout);
if (result < 0) {
_D_LOG_PERROR("Can't select(%d) device for VSync", run->fd);
_LOG_PERROR("Can't select(%d) device for VSync", run->fd);
return -1;
} else if (result == 0) {
_D_LOG_ERROR("Device timeout while waiting VSync");
_LOG_ERROR("Device timeout while waiting VSync");
return -1;
}
@@ -289,9 +325,9 @@ int us_drm_wait_for_vsync(us_drm_s *drm) {
.version = DRM_EVENT_CONTEXT_VERSION,
.page_flip_handler = _drm_vsync_callback,
};
_D_LOG_DEBUG("Handling DRM event (maybe VSync) ...");
_LOG_DEBUG("Handling DRM event (maybe VSync) ...");
if (drmHandleEvent(run->fd, &ctx) < 0) {
_D_LOG_PERROR("Can't handle DRM event");
_LOG_PERROR("Can't handle DRM event");
return -1;
}
return 0;
@@ -305,18 +341,19 @@ static void _drm_vsync_callback(int fd, uint n_frame, uint sec, uint usec, void
us_drm_buffer_s *const buf = v_buf;
*buf->ctx.has_vsync = true;
*buf->ctx.exposing_dma_fd = -1;
_D_LOG_DEBUG("Got VSync signal");
_LOG_DEBUG("Got VSync signal");
}
int us_drm_expose_stub(us_drm_s *drm, us_drm_stub_e stub, const us_device_s *dev) {
int us_drm_expose_stub(us_drm_s *drm, us_drm_stub_e stub, const us_capture_s *cap) {
us_drm_runtime_s *const run = drm->run;
assert(run->fd >= 0);
assert(run->opened_for_stub);
assert(run->opened > 0);
run->blank_at_ts = 0;
switch (_drm_check_status(drm)) {
case 0: break;
case -2: return -2;
case US_ERROR_NO_DEVICE: return US_ERROR_NO_DEVICE;
default: return -1;
}
_drm_ensure_dpms_power(drm, true);
@@ -324,24 +361,19 @@ int us_drm_expose_stub(us_drm_s *drm, us_drm_stub_e stub, const us_device_s *dev
# define DRAW_MSG(x_msg) us_frametext_draw(run->ft, (x_msg), run->mode.hdisplay, run->mode.vdisplay)
switch (stub) {
case US_DRM_STUB_BAD_RESOLUTION: {
assert(dev != NULL);
assert(cap != NULL);
char msg[1024];
US_SNPRINTF(msg, 1023,
"=== PiKVM ==="
"\n \n< UNSUPPORTED RESOLUTION >"
"\n \n< %ux%up%.02f >"
"\n \nby this display",
dev->run->width, dev->run->height, dev->run->hz);
cap->run->width, cap->run->height, cap->run->hz);
DRAW_MSG(msg);
break;
};
case US_DRM_STUB_BAD_FORMAT:
DRAW_MSG(
"=== PiKVM ==="
"\n \n< UNSUPPORTED CAPTURE FORMAT >"
"\n \nIt shouldn't happen ever."
"\n \nPlease check the logs and report a bug:"
"\n \n- https://github.com/pikvm/pikvm -");
DRAW_MSG("=== PiKVM ===\n \n< UNSUPPORTED CAPTURE FORMAT >");
break;
case US_DRM_STUB_NO_SIGNAL:
DRAW_MSG("=== PiKVM ===\n \n< NO SIGNAL >");
@@ -359,48 +391,49 @@ int us_drm_expose_stub(us_drm_s *drm, us_drm_stub_e stub, const us_device_s *dev
run->has_vsync = false;
_D_LOG_DEBUG("Copying STUB frame ...")
_LOG_DEBUG("Copying STUB frame ...")
memcpy(buf->data, run->ft->frame->data, US_MIN(run->ft->frame->used, buf->allocated));
_D_LOG_DEBUG("Exposing STUB framebuffer n_buf=%u ...", run->stub_n_buf);
_LOG_DEBUG("Exposing STUB framebuffer n_buf=%u ...", run->stub_n_buf);
const int retval = drmModePageFlip(
run->fd, run->crtc_id, buf->id,
DRM_MODE_PAGE_FLIP_EVENT | DRM_MODE_PAGE_FLIP_ASYNC,
buf);
if (retval < 0) {
_D_LOG_PERROR("Can't expose STUB framebuffer n_buf=%u ...", run->stub_n_buf);
_LOG_PERROR("Can't expose STUB framebuffer n_buf=%u ...", run->stub_n_buf);
}
_D_LOG_DEBUG("Exposed STUB framebuffer n_buf=%u", run->stub_n_buf);
_LOG_DEBUG("Exposed STUB framebuffer n_buf=%u", run->stub_n_buf);
run->stub_n_buf = (run->stub_n_buf + 1) % run->n_bufs;
return retval;
}
int us_drm_expose_dma(us_drm_s *drm, const us_hw_buffer_s *hw) {
int us_drm_expose_dma(us_drm_s *drm, const us_capture_hwbuf_s *hw) {
us_drm_runtime_s *const run = drm->run;
us_drm_buffer_s *const buf = &run->bufs[hw->buf.index];
assert(run->fd >= 0);
assert(!run->opened_for_stub);
assert(run->opened == 0);
run->blank_at_ts = 0;
switch (_drm_check_status(drm)) {
case 0: break;
case -2: return -2;
case US_ERROR_NO_DEVICE: return US_ERROR_NO_DEVICE;
default: return -1;
}
_drm_ensure_dpms_power(drm, true);
run->has_vsync = false;
_D_LOG_DEBUG("Exposing DMA framebuffer n_buf=%u ...", hw->buf.index);
_LOG_DEBUG("Exposing DMA framebuffer n_buf=%u ...", hw->buf.index);
const int retval = drmModePageFlip(
run->fd, run->crtc_id, buf->id,
DRM_MODE_PAGE_FLIP_EVENT | DRM_MODE_PAGE_FLIP_ASYNC,
buf);
if (retval < 0) {
_D_LOG_PERROR("Can't expose DMA framebuffer n_buf=%u ...", run->stub_n_buf);
_LOG_PERROR("Can't expose DMA framebuffer n_buf=%u ...", run->stub_n_buf);
}
_D_LOG_DEBUG("Exposed DMA framebuffer n_buf=%u", run->stub_n_buf);
_LOG_DEBUG("Exposed DMA framebuffer n_buf=%u", run->stub_n_buf);
run->exposing_dma_fd = hw->dma_fd;
return retval;
}
@@ -409,36 +442,36 @@ static int _drm_check_status(us_drm_s *drm) {
us_drm_runtime_s *run = drm->run;
if (run->status_fd < 0) {
_D_LOG_DEBUG("Trying to find status file ...");
_LOG_DEBUG("Trying to find status file ...");
struct stat st;
if (stat(drm->path, &st) < 0) {
_D_LOG_PERROR("Can't stat() DRM device");
_LOG_PERROR("Can't stat() DRM device");
goto error;
}
const uint mi = minor(st.st_rdev);
_D_LOG_DEBUG("DRM device minor(st_rdev)=%u", mi);
_LOG_DEBUG("DRM device minor(st_rdev)=%u", mi);
char path[128];
US_SNPRINTF(path, 127, "/sys/class/drm/card%u-%s/status", mi, drm->port);
_D_LOG_DEBUG("Opening status file %s ...", path);
_LOG_DEBUG("Opening status file %s ...", path);
if ((run->status_fd = open(path, O_RDONLY | O_CLOEXEC)) < 0) {
_D_LOG_PERROR("Can't open status file: %s", path);
_LOG_PERROR("Can't open status file: %s", path);
goto error;
}
_D_LOG_DEBUG("Status file fd=%d opened", run->status_fd);
_LOG_DEBUG("Status file fd=%d opened", run->status_fd);
}
char status_ch;
if (read(run->status_fd, &status_ch, 1) != 1) {
_D_LOG_PERROR("Can't read status file");
_LOG_PERROR("Can't read status file");
goto error;
}
if (lseek(run->status_fd, 0, SEEK_SET) != 0) {
_D_LOG_PERROR("Can't rewind status file");
_LOG_PERROR("Can't rewind status file");
goto error;
}
_D_LOG_DEBUG("Current display status: %c", status_ch);
return (status_ch == 'd' ? -2 : 0);
_LOG_DEBUG("Current display status: %c", status_ch);
return (status_ch == 'd' ? US_ERROR_NO_DEVICE : 0);
error:
US_CLOSE_FD(run->status_fd);
@@ -448,24 +481,26 @@ error:
static void _drm_ensure_dpms_power(us_drm_s *drm, bool on) {
us_drm_runtime_s *const run = drm->run;
if (run->dpms_id > 0 && run->dpms_state != (int)on) {
_D_LOG_INFO("Changing DPMS power mode: %d -> %u ...", run->dpms_state, on);
_LOG_INFO("Changing DPMS power mode: %d -> %u ...", run->dpms_state, on);
if (drmModeConnectorSetProperty(
run->fd, run->conn_id, run->dpms_id,
(on ? DRM_MODE_DPMS_ON : DRM_MODE_DPMS_OFF)
) < 0) {
_D_LOG_PERROR("Can't set DPMS power=%u (ignored)", on);
_LOG_PERROR("Can't set DPMS power=%u (ignored)", on);
}
}
run->dpms_state = (int)on;
}
static int _drm_init_buffers(us_drm_s *drm, const us_device_s *dev) {
static int _drm_init_buffers(us_drm_s *drm, const us_capture_s *cap) {
us_drm_runtime_s *const run = drm->run;
const uint n_bufs = (dev == NULL ? 4 : dev->run->n_bufs);
const char *name = (dev == NULL ? "STUB" : "DMA");
const uint n_bufs = (cap == NULL ? 4 : cap->run->n_bufs);
const char *name = (cap == NULL ? "STUB" : "DMA");
_D_LOG_DEBUG("Initializing %u %s buffers ...", n_bufs, name);
_LOG_DEBUG("Initializing %u %s buffers ...", n_bufs, name);
uint format = DRM_FORMAT_RGB888;
US_CALLOC(run->bufs, n_bufs);
for (run->n_bufs = 0; run->n_bufs < n_bufs; ++run->n_bufs) {
@@ -479,14 +514,14 @@ static int _drm_init_buffers(us_drm_s *drm, const us_device_s *dev) {
u32 strides[4] = {0};
u32 offsets[4] = {0};
if (dev == NULL) {
if (cap == NULL) {
struct drm_mode_create_dumb create = {
.width = run->mode.hdisplay,
.height = run->mode.vdisplay,
.bpp = 24,
};
if (drmIoctl(run->fd, DRM_IOCTL_MODE_CREATE_DUMB, &create) < 0) {
_D_LOG_PERROR("Can't create %s buffer=%u", name, n_buf);
_LOG_PERROR("Can't create %s buffer=%u", name, n_buf);
return -1;
}
buf->handle = create.handle;
@@ -494,7 +529,7 @@ static int _drm_init_buffers(us_drm_s *drm, const us_device_s *dev) {
struct drm_mode_map_dumb map = {.handle = create.handle};
if (drmIoctl(run->fd, DRM_IOCTL_MODE_MAP_DUMB, &map) < 0) {
_D_LOG_PERROR("Can't prepare dumb buffer=%u to mapping", n_buf);
_LOG_PERROR("Can't prepare dumb buffer=%u to mapping", n_buf);
return -1;
}
if ((buf->data = mmap(
@@ -502,7 +537,7 @@ static int _drm_init_buffers(us_drm_s *drm, const us_device_s *dev) {
PROT_READ | PROT_WRITE, MAP_SHARED,
run->fd, map.offset
)) == MAP_FAILED) {
_D_LOG_PERROR("Can't map buffer=%u", n_buf);
_LOG_PERROR("Can't map buffer=%u", n_buf);
return -1;
}
memset(buf->data, 0, create.size);
@@ -512,20 +547,25 @@ static int _drm_init_buffers(us_drm_s *drm, const us_device_s *dev) {
strides[0] = create.pitch;
} else {
if (drmPrimeFDToHandle(run->fd, dev->run->hw_bufs[n_buf].dma_fd, &buf->handle) < 0) {
_D_LOG_PERROR("Can't import DMA buffer=%u from capture device", n_buf);
if (drmPrimeFDToHandle(run->fd, cap->run->bufs[n_buf].dma_fd, &buf->handle) < 0) {
_LOG_PERROR("Can't import DMA buffer=%u from capture device", n_buf);
return -1;
}
handles[0] = buf->handle;
strides[0] = dev->run->stride;
strides[0] = cap->run->stride;
switch (cap->run->format) {
case V4L2_PIX_FMT_RGB24: format = (DRM_FORMAT_BIG_ENDIAN ? DRM_FORMAT_BGR888 : DRM_FORMAT_RGB888); break;
case V4L2_PIX_FMT_BGR24: format = (DRM_FORMAT_BIG_ENDIAN ? DRM_FORMAT_RGB888 : DRM_FORMAT_BGR888); break;
}
}
if (drmModeAddFB2(
run->fd,
run->mode.hdisplay, run->mode.vdisplay, DRM_FORMAT_RGB888,
run->mode.hdisplay, run->mode.vdisplay, format,
handles, strides, offsets, &buf->id, 0
)) {
_D_LOG_PERROR("Can't setup buffer=%u", n_buf);
_LOG_PERROR("Can't setup buffer=%u", n_buf);
return -1;
}
buf->fb_added = true;
@@ -538,22 +578,22 @@ static int _drm_find_sink(us_drm_s *drm, uint width, uint height, float hz) {
run->crtc_id = 0;
_D_LOG_DEBUG("Trying to find the appropriate sink ...");
_LOG_DEBUG("Trying to find the appropriate sink ...");
drmModeRes *res = drmModeGetResources(run->fd);
if (res == NULL) {
_D_LOG_PERROR("Can't get resources info");
_LOG_PERROR("Can't get resources info");
goto done;
}
if (res->count_connectors <= 0) {
_D_LOG_ERROR("Can't find any connectors");
_LOG_ERROR("Can't find any connectors");
goto done;
}
for (int ci = 0; ci < res->count_connectors; ++ci) {
drmModeConnector *conn = drmModeGetConnector(run->fd, res->connectors[ci]);
if (conn == NULL) {
_D_LOG_PERROR("Can't get connector index=%d", ci);
_LOG_PERROR("Can't get connector index=%d", ci);
goto done;
}
@@ -565,37 +605,37 @@ static int _drm_find_sink(us_drm_s *drm, uint width, uint height, float hz) {
drmModeFreeConnector(conn);
continue;
}
_D_LOG_INFO("Using connector %s: conn_type=%d, conn_type_id=%d",
_LOG_INFO("Using connector %s: conn_type=%d, conn_type_id=%d",
drm->port, conn->connector_type, conn->connector_type_id);
if (conn->connection != DRM_MODE_CONNECTED) {
_D_LOG_ERROR("Connector for port %s has !DRM_MODE_CONNECTED", drm->port);
_LOG_ERROR("Connector for port %s has !DRM_MODE_CONNECTED", drm->port);
drmModeFreeConnector(conn);
goto done;
}
drmModeModeInfo *best;
const drmModeModeInfo *best;
if ((best = _find_best_mode(conn, width, height, hz)) == NULL) {
_D_LOG_ERROR("Can't find any appropriate display modes");
_LOG_ERROR("Can't find any appropriate display modes");
drmModeFreeConnector(conn);
goto unplugged;
}
_D_LOG_INFO("Using best mode: %ux%up%.02f",
_LOG_INFO("Using best mode: %ux%up%.02f",
best->hdisplay, best->vdisplay, _get_refresh_rate(best));
if ((run->dpms_id = _find_dpms(run->fd, conn)) > 0) {
_D_LOG_INFO("Using DPMS: id=%u", run->dpms_id);
_LOG_INFO("Using DPMS: id=%u", run->dpms_id);
} else {
_D_LOG_INFO("Using DPMS: None");
_LOG_INFO("Using DPMS: None");
}
u32 taken_crtcs = 0; // Unused here
if ((run->crtc_id = _find_crtc(run->fd, res, conn, &taken_crtcs)) == 0) {
_D_LOG_ERROR("Can't find CRTC");
_LOG_ERROR("Can't find CRTC");
drmModeFreeConnector(conn);
goto done;
}
_D_LOG_INFO("Using CRTC: id=%u", run->crtc_id);
_LOG_INFO("Using CRTC: id=%u", run->crtc_id);
run->conn_id = conn->connector_id;
memcpy(&run->mode, best, sizeof(drmModeModeInfo));
@@ -610,7 +650,7 @@ done:
unplugged:
drmModeFreeResources(res);
return -2;
return US_ERROR_NO_DEVICE;
}
static drmModeModeInfo *_find_best_mode(drmModeConnector *conn, uint width, uint height, float hz) {

View File

@@ -25,10 +25,10 @@
#include <xf86drmMode.h>
#include "../libs/types.h"
#include "../libs/frame.h"
#include "../libs/frametext.h"
#include "../libs/device.h"
#include "../types.h"
#include "../frame.h"
#include "../frametext.h"
#include "../capture.h"
typedef enum {
@@ -63,11 +63,14 @@ typedef struct {
uint n_bufs;
drmModeCrtc *saved_crtc;
int dpms_state;
bool opened_for_stub;
int opened;
bool has_vsync;
int exposing_dma_fd;
uint stub_n_buf;
bool unplugged_reported;
ldf blank_at_ts;
int once;
us_frametext_s *ft;
} us_drm_runtime_s;
@@ -75,6 +78,7 @@ typedef struct {
char *path;
char *port;
uint timeout;
uint blank_after;
us_drm_runtime_s *run;
} us_drm_s;
@@ -83,10 +87,11 @@ typedef struct {
us_drm_s *us_drm_init(void);
void us_drm_destroy(us_drm_s *drm);
int us_drm_open(us_drm_s *drm, const us_device_s *dev);
int us_drm_open(us_drm_s *drm, const us_capture_s *cap);
void us_drm_close(us_drm_s *drm);
int us_drm_dpms_power_off(us_drm_s *drm);
int us_drm_wait_for_vsync(us_drm_s *drm);
int us_drm_expose_stub(us_drm_s *drm, us_drm_stub_e stub, const us_device_s *dev);
int us_drm_expose_dma(us_drm_s *drm, const us_hw_buffer_s *hw);
int us_drm_expose_stub(us_drm_s *drm, us_drm_stub_e stub, const us_capture_s *cap);
int us_drm_expose_dma(us_drm_s *drm, const us_capture_hwbuf_s *hw);
int us_drm_ensure_no_signal(us_drm_s *drm);

View File

@@ -20,59 +20,8 @@
*****************************************************************************/
#include "h264.h"
#pragma once
#include <stdatomic.h>
#include "../libs/types.h"
#include "../libs/tools.h"
#include "../libs/logging.h"
#include "../libs/frame.h"
#include "../libs/memsink.h"
#include "../libs/unjpeg.h"
#include "m2m.h"
us_h264_stream_s *us_h264_stream_init(us_memsink_s *sink, const char *path, uint bitrate, uint gop) {
us_h264_stream_s *h264;
US_CALLOC(h264, 1);
h264->sink = sink;
h264->tmp_src = us_frame_init();
h264->dest = us_frame_init();
atomic_init(&h264->online, false);
h264->enc = us_m2m_h264_encoder_init("H264", path, bitrate, gop);
return h264;
}
void us_h264_stream_destroy(us_h264_stream_s *h264) {
us_m2m_encoder_destroy(h264->enc);
us_frame_destroy(h264->dest);
us_frame_destroy(h264->tmp_src);
free(h264);
}
void us_h264_stream_process(us_h264_stream_s *h264, const us_frame_s *frame, bool force_key) {
if (us_is_jpeg(frame->format)) {
const ldf now_ts = us_get_now_monotonic();
US_LOG_DEBUG("H264: Input frame is JPEG; decoding ...");
if (us_unjpeg(frame, h264->tmp_src, true) < 0) {
atomic_store(&h264->online, false);
return;
}
frame = h264->tmp_src;
US_LOG_VERBOSE("H264: JPEG decoded; time=%.3Lf", us_get_now_monotonic() - now_ts);
}
if (h264->key_requested) {
US_LOG_INFO("H264: Requested keyframe by a sink client");
h264->key_requested = false;
force_key = true;
}
bool online = false;
if (!us_m2m_encoder_compress(h264->enc, frame, h264->dest, force_key)) {
online = !us_memsink_server_put(h264->sink, h264->dest, &h264->key_requested);
}
atomic_store(&h264->online, online);
}
#define US_ERROR_COMMON -1
#define US_ERROR_NO_DEVICE -2
#define US_ERROR_NO_DATA -3

112
src/libs/fpsi.c Normal file
View File

@@ -0,0 +1,112 @@
/*****************************************************************************
# #
# uStreamer - Lightweight and fast MJPEG-HTTP streamer. #
# #
# Copyright (C) 2018-2024 Maxim Devaev <mdevaev@gmail.com> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <https://www.gnu.org/licenses/>. #
# #
*****************************************************************************/
#include "fpsi.h"
#include <stdatomic.h>
#include <pthread.h>
#include "types.h"
#include "tools.h"
#include "threading.h"
#include "logging.h"
#include "frame.h"
us_fpsi_s *us_fpsi_init(const char *name, bool with_meta) {
us_fpsi_s *fpsi;
US_CALLOC(fpsi, 1);
fpsi->name = us_strdup(name);
fpsi->with_meta = with_meta;
atomic_init(&fpsi->state_sec_ts, 0);
atomic_init(&fpsi->state, 0);
return fpsi;
}
void us_fpsi_destroy(us_fpsi_s *fpsi) {
free(fpsi->name);
free(fpsi);
}
void us_fpsi_frame_to_meta(const us_frame_s *frame, us_fpsi_meta_s *meta) {
meta->width = frame->width;
meta->height = frame->height;
meta->online = frame->online;
}
void us_fpsi_update(us_fpsi_s *fpsi, bool bump, const us_fpsi_meta_s *meta) {
if (meta != NULL) {
assert(fpsi->with_meta);
} else {
assert(!fpsi->with_meta);
}
const sll now_sec_ts = us_floor_ms(us_get_now_monotonic());
if (atomic_load(&fpsi->state_sec_ts) != now_sec_ts) {
US_LOG_PERF_FPS("FPS: %s: %u", fpsi->name, fpsi->accum);
// Fast mutex-less store method
ull state = (ull)fpsi->accum & 0xFFFF;
if (fpsi->with_meta) {
assert(meta != NULL);
state |= (ull)(meta->width & 0xFFFF) << 16;
state |= (ull)(meta->height & 0xFFFF) << 32;
state |= (ull)(meta->online ? 1 : 0) << 48;
}
atomic_store(&fpsi->state, state); // Сначала инфа
atomic_store(&fpsi->state_sec_ts, now_sec_ts); // Потом время, это важно
fpsi->accum = 0;
}
if (bump) {
++fpsi->accum;
}
}
uint us_fpsi_get(us_fpsi_s *fpsi, us_fpsi_meta_s *meta) {
if (meta != NULL) {
assert(fpsi->with_meta);
} else {
assert(!fpsi->with_meta);
}
// Между чтением инфы и времени может быть гонка,
// но это неважно. Если время свежее, до данные тоже
// будут свежмими, обратный случай не так важен.
const sll now_sec_ts = us_floor_ms(us_get_now_monotonic());
const sll state_sec_ts = atomic_load(&fpsi->state_sec_ts); // Сначала время
const ull state = atomic_load(&fpsi->state); // Потом инфа
uint current = state & 0xFFFF;
if (fpsi->with_meta) {
assert(meta != NULL);
meta->width = (state >> 16) & 0xFFFF;
meta->height = (state >> 32) & 0xFFFF;
meta->online = (state >> 48) & 1;
}
if (state_sec_ts != now_sec_ts && (state_sec_ts + 1) != now_sec_ts) {
// Только текущая или прошлая секунда
current = 0;
}
return current;
}

View File

@@ -24,23 +24,28 @@
#include <stdatomic.h>
#include "../libs/types.h"
#include "../libs/frame.h"
#include "../libs/memsink.h"
#include "m2m.h"
#include "types.h"
#include "frame.h"
typedef struct {
us_memsink_s *sink;
bool key_requested;
us_frame_s *tmp_src;
us_frame_s *dest;
us_m2m_encoder_s *enc;
atomic_bool online;
} us_h264_stream_s;
uint width;
uint height;
bool online;
} us_fpsi_meta_s;
typedef struct {
char *name;
bool with_meta;
uint accum;
atomic_llong state_sec_ts;
atomic_ullong state;
} us_fpsi_s;
us_h264_stream_s *us_h264_stream_init(us_memsink_s *sink, const char *path, uint bitrate, uint gop);
void us_h264_stream_destroy(us_h264_stream_s *h264);
void us_h264_stream_process(us_h264_stream_s *h264, const us_frame_s *frame, bool force_key);
us_fpsi_s *us_fpsi_init(const char *name, bool with_meta);
void us_fpsi_destroy(us_fpsi_s *fpsi);
void us_fpsi_frame_to_meta(const us_frame_s *frame, us_fpsi_meta_s *meta);
void us_fpsi_update(us_fpsi_s *fpsi, bool bump, const us_fpsi_meta_s *meta);
uint us_fpsi_get(us_fpsi_s *fpsi, us_fpsi_meta_s *meta);

View File

@@ -26,54 +26,58 @@
#include "tools.h"
#define US_FRAME_META_DECLARE \
uint width; \
uint height; \
uint format; \
uint stride; \
/* Stride is a bytesperline in V4L2 */ \
/* https://www.kernel.org/doc/html/v4.14/media/uapi/v4l/pixfmt-v4l2.html */ \
/* https://medium.com/@oleg.shipitko/what-does-stride-mean-in-image-processing-bba158a72bcd */ \
bool online; \
bool key; \
uint gop; \
\
ldf grab_ts; \
ldf encode_begin_ts; \
ldf encode_end_ts;
typedef struct {
u8 *data;
uz used;
uz allocated;
int dma_fd;
uint width;
uint height;
uint format;
uint stride;
// Stride is a bytesperline in V4L2
// https://www.kernel.org/doc/html/v4.14/media/uapi/v4l/pixfmt-v4l2.html
// https://medium.com/@oleg.shipitko/what-does-stride-mean-in-image-processing-bba158a72bcd
bool online;
bool key;
uint gop;
ldf grab_ts;
ldf encode_begin_ts;
ldf encode_end_ts;
US_FRAME_META_DECLARE;
} us_frame_s;
#define US_FRAME_COPY_META(x_src, x_dest) { \
x_dest->width = x_src->width; \
x_dest->height = x_src->height; \
x_dest->format = x_src->format; \
x_dest->stride = x_src->stride; \
x_dest->online = x_src->online; \
x_dest->key = x_src->key; \
x_dest->gop = x_src->gop; \
(x_dest)->width = (x_src)->width; \
(x_dest)->height = (x_src)->height; \
(x_dest)->format = (x_src)->format; \
(x_dest)->stride = (x_src)->stride; \
(x_dest)->online = (x_src)->online; \
(x_dest)->key = (x_src)->key; \
(x_dest)->gop = (x_src)->gop; \
\
x_dest->grab_ts = x_src->grab_ts; \
x_dest->encode_begin_ts = x_src->encode_begin_ts; \
x_dest->encode_end_ts = x_src->encode_end_ts; \
(x_dest)->grab_ts = (x_src)->grab_ts; \
(x_dest)->encode_begin_ts = (x_src)->encode_begin_ts; \
(x_dest)->encode_end_ts = (x_src)->encode_end_ts; \
}
#define US_FRAME_COMPARE_GEOMETRY(x_a, x_b) ( \
/* Compare the used size and significant meta (no timings) */ \
x_a->used == x_b->used \
(x_a)->used == (x_b)->used \
\
&& x_a->width == x_b->width \
&& x_a->height == x_b->height \
&& x_a->format == x_b->format \
&& x_a->stride == x_b->stride \
&& x_a->online == x_b->online \
&& x_a->key == x_b->key \
&& x_a->gop == x_b->gop \
&& (x_a)->width == (x_b)->width \
&& (x_a)->height == (x_b)->height \
&& (x_a)->format == (x_b)->format \
&& (x_a)->stride == (x_b)->stride \
&& (x_a)->online == (x_b)->online \
&& (x_a)->key == (x_b)->key \
&& (x_a)->gop == (x_b)->gop \
)

View File

@@ -25,9 +25,9 @@
#include <assert.h>
#define US_LIST_STRUCT(...) \
__VA_ARGS__ *prev; \
__VA_ARGS__ *next;
#define US_LIST_DECLARE \
void *prev; \
void *next;
#define US_LIST_ITERATE(x_first, x_item, ...) { \
for (__typeof__(x_first) x_item = x_first; x_item;) { \
@@ -42,10 +42,11 @@
x_first = x_item; \
} else { \
__typeof__(x_first) m_last = x_first; \
for (; m_last->next; m_last = m_last->next); \
for (; m_last->next != NULL; m_last = m_last->next); \
x_item->prev = m_last; \
m_last->next = x_item; \
} \
x_item->next = NULL; \
}
#define US_LIST_APPEND_C(x_first, x_item, x_count) { \
@@ -57,11 +58,15 @@
if (x_item->prev == NULL) { \
x_first = x_item->next; \
} else { \
x_item->prev->next = x_item->next; \
__typeof__(x_first) m_prev = x_item->prev; \
m_prev->next = x_item->next; \
} \
if (x_item->next != NULL) { \
x_item->next->prev = x_item->prev; \
__typeof__(x_first) m_next = x_item->next; \
m_next->prev = x_item->prev; \
} \
x_item->prev = NULL; \
x_item->next = NULL; \
}
#define US_LIST_REMOVE_C(x_first, x_item, x_count) { \

View File

@@ -33,13 +33,14 @@
#include <sys/mman.h>
#include "types.h"
#include "errors.h"
#include "tools.h"
#include "logging.h"
#include "frame.h"
#include "memsinksh.h"
us_memsink_s *us_memsink_init(
us_memsink_s *us_memsink_init_opened(
const char *name, const char *obj, bool server,
mode_t mode, bool rm, uint client_ttl, uint timeout) {
@@ -168,7 +169,7 @@ int us_memsink_server_put(us_memsink_s *sink, const us_frame_s *frame, bool *key
if (frame->used > sink->data_size) {
US_LOG_ERROR("%s-sink: Can't put frame: is too big (%zu > %zu)",
sink->name, frame->used, sink->data_size);
return 0; // -2
return 0;
}
if (us_flock_timedwait_monotonic(sink->fd, 1) == 0) {
@@ -213,7 +214,7 @@ int us_memsink_client_get(us_memsink_s *sink, us_frame_s *frame, bool *key_reque
if (us_flock_timedwait_monotonic(sink->fd, sink->timeout) < 0) {
if (errno == EWOULDBLOCK) {
return -2;
return US_ERROR_NO_DATA;
}
US_LOG_PERROR("%s-sink: Can't lock memory", sink->name);
return -1;
@@ -222,7 +223,7 @@ int us_memsink_client_get(us_memsink_s *sink, us_frame_s *frame, bool *key_reque
int retval = 0;
if (sink->mem->magic != US_MEMSINK_MAGIC) {
retval = -2; // Not updated
retval = US_ERROR_NO_DATA; // Not updated
goto done;
}
if (sink->mem->version != US_MEMSINK_VERSION) {
@@ -236,7 +237,7 @@ int us_memsink_client_get(us_memsink_s *sink, us_frame_s *frame, bool *key_reque
sink->mem->last_client_ts = us_get_now_monotonic();
if (sink->mem->id == sink->last_readed_id) {
retval = -2; // Not updated
retval = US_ERROR_NO_DATA; // Not updated
goto done;
}

View File

@@ -50,7 +50,7 @@ typedef struct {
} us_memsink_s;
us_memsink_s *us_memsink_init(
us_memsink_s *us_memsink_init_opened(
const char *name, const char *obj, bool server,
mode_t mode, bool rm, uint client_ttl, uint timeout);

View File

@@ -23,33 +23,23 @@
#pragma once
#include "types.h"
#include "frame.h"
#define US_MEMSINK_MAGIC ((u64)0xCAFEBABECAFEBABE)
#define US_MEMSINK_VERSION ((u32)6)
#define US_MEMSINK_VERSION ((u32)7)
typedef struct {
u64 magic;
u32 version;
u64 id;
uz used;
uint width;
uint height;
uint format;
uint stride;
bool online;
bool key;
uint gop;
ldf grab_ts;
ldf encode_begin_ts;
ldf encode_end_ts;
ldf last_client_ts;
bool key_requested;
US_FRAME_META_DECLARE;
} us_memsink_shared_s;

View File

@@ -58,7 +58,7 @@ int us_tc358743_xioctl_get_audio_hz(int fd, uint *audio_hz) {
US_MEMSET_ZERO(ctl);
ctl.id = TC358743_CID_AUDIO_SAMPLING_RATE;
if (us_xioctl(fd, VIDIOC_G_CTRL, &ctl) < 0) {
return -2;
return -1;
}
*audio_hz = ctl.value;
return 0;

View File

@@ -113,7 +113,9 @@ INLINE void us_thread_get_name(char *name) { // Always required for logging
#if defined(__linux__)
const pid_t tid = syscall(SYS_gettid);
#elif defined(__FreeBSD__)
const pid_t tid = syscall(SYS_thr_self);
long id;
assert(!syscall(SYS_thr_self, &id));
const pid_t tid = id;
#elif defined(__OpenBSD__)
const pid_t tid = syscall(SYS_getthrid);
#elif defined(__NetBSD__)

View File

@@ -72,14 +72,16 @@
(m_a > m_b ? m_a : m_b); \
})
#define US_ONCE(...) { \
const int m_reported = __LINE__; \
if (m_reported != once) { \
#define US_ONCE_FOR(x_once, x_value, ...) { \
const int m_reported = (x_value); \
if (m_reported != (x_once)) { \
__VA_ARGS__; \
once = m_reported; \
(x_once) = m_reported; \
} \
}
#define US_ONCE(...) US_ONCE_FOR(once, __LINE__, ##__VA_ARGS__)
INLINE char *us_strdup(const char *str) {
char *const new = strdup(str);

View File

@@ -25,6 +25,8 @@
#include <stdbool.h>
#include <stdint.h>
#include <sys/types.h>
typedef long long sll;
typedef ssize_t sz;

View File

@@ -22,6 +22,26 @@
#include "encoder.h"
#include <stdlib.h>
#include <strings.h>
#include <assert.h>
#include <pthread.h>
#include "../libs/types.h"
#include "../libs/tools.h"
#include "../libs/array.h"
#include "../libs/threading.h"
#include "../libs/logging.h"
#include "../libs/frame.h"
#include "../libs/capture.h"
#include "workers.h"
#include "m2m.h"
#include "encoders/cpu/encoder.h"
#include "encoders/hw/encoder.h"
static const struct {
const char *name;
@@ -34,7 +54,7 @@ static const struct {
{"M2M-MJPEG", US_ENCODER_TYPE_M2M_VIDEO},
{"M2M-JPEG", US_ENCODER_TYPE_M2M_IMAGE},
{"OMX", US_ENCODER_TYPE_M2M_IMAGE},
{"NOOP", US_ENCODER_TYPE_NOOP},
{"NOOP", US_ENCODER_TYPE_CPU},
};
@@ -43,9 +63,6 @@ static void _worker_job_destroy(void *v_job);
static bool _worker_run_job(us_worker_s *wr);
#define _ER(x_next) enc->run->x_next
us_encoder_s *us_encoder_init(void) {
us_encoder_runtime_s *run;
US_CALLOC(run, 1);
@@ -62,14 +79,15 @@ us_encoder_s *us_encoder_init(void) {
}
void us_encoder_destroy(us_encoder_s *enc) {
if (_ER(m2ms) != NULL) {
for (unsigned index = 0; index < _ER(n_m2ms); ++index) {
US_DELETE(_ER(m2ms[index]), us_m2m_encoder_destroy)
us_encoder_runtime_s *const run = enc->run;
if (run->m2ms != NULL) {
for (uint index = 0; index < run->n_m2ms; ++index) {
US_DELETE(run->m2ms[index], us_m2m_encoder_destroy);
}
free(_ER(m2ms));
free(run->m2ms);
}
US_MUTEX_DESTROY(_ER(mutex));
free(enc->run);
US_MUTEX_DESTROY(run->mutex);
free(run);
free(enc);
}
@@ -91,86 +109,70 @@ const char *us_encoder_type_to_string(us_encoder_type_e type) {
return _ENCODER_TYPES[0].name;
}
void us_encoder_open(us_encoder_s *enc, us_device_s *dev) {
assert(enc->run->pool == NULL);
void us_encoder_open(us_encoder_s *enc, us_capture_s *cap) {
us_encoder_runtime_s *const run = enc->run;
us_capture_runtime_s *const cr = cap->run;
# define DR(x_next) dev->run->x_next
assert(run->pool == NULL);
us_encoder_type_e type = (_ER(cpu_forced) ? US_ENCODER_TYPE_CPU : enc->type);
unsigned quality = dev->jpeg_quality;
unsigned n_workers = US_MIN(enc->n_workers, DR(n_bufs));
bool cpu_forced = false;
us_encoder_type_e type = enc->type;
uint quality = cap->jpeg_quality;
uint n_workers = US_MIN(enc->n_workers, cr->n_bufs);
if (us_is_jpeg(DR(format)) && type != US_ENCODER_TYPE_HW) {
if (us_is_jpeg(cr->format) && type != US_ENCODER_TYPE_HW) {
US_LOG_INFO("Switching to HW encoder: the input is (M)JPEG ...");
type = US_ENCODER_TYPE_HW;
}
if (type == US_ENCODER_TYPE_HW) {
if (!us_is_jpeg(DR(format))) {
if (us_is_jpeg(cr->format)) {
quality = cr->jpeg_quality;
n_workers = 1;
} else {
US_LOG_INFO("Switching to CPU encoder: the input format is not (M)JPEG ...");
goto use_cpu;
type = US_ENCODER_TYPE_CPU;
quality = cap->jpeg_quality;
}
quality = DR(jpeg_quality);
n_workers = 1;
} else if (type == US_ENCODER_TYPE_M2M_VIDEO || type == US_ENCODER_TYPE_M2M_IMAGE) {
US_LOG_DEBUG("Preparing M2M-%s encoder ...", (type == US_ENCODER_TYPE_M2M_VIDEO ? "VIDEO" : "IMAGE"));
if (_ER(m2ms) == NULL) {
US_CALLOC(_ER(m2ms), n_workers);
if (run->m2ms == NULL) {
US_CALLOC(run->m2ms, n_workers);
}
for (; _ER(n_m2ms) < n_workers; ++_ER(n_m2ms)) {
for (; run->n_m2ms < n_workers; ++run->n_m2ms) {
// Начинаем с нуля и доинициализируем на следующих заходах при необходимости
char name[32];
US_SNPRINTF(name, 31, "JPEG-%u", _ER(n_m2ms));
US_SNPRINTF(name, 31, "JPEG-%u", run->n_m2ms);
if (type == US_ENCODER_TYPE_M2M_VIDEO) {
_ER(m2ms[_ER(n_m2ms)]) = us_m2m_mjpeg_encoder_init(name, enc->m2m_path, quality);
run->m2ms[run->n_m2ms] = us_m2m_mjpeg_encoder_init(name, enc->m2m_path, quality);
} else {
_ER(m2ms[_ER(n_m2ms)]) = us_m2m_jpeg_encoder_init(name, enc->m2m_path, quality);
run->m2ms[run->n_m2ms] = us_m2m_jpeg_encoder_init(name, enc->m2m_path, quality);
}
}
} else if (type == US_ENCODER_TYPE_NOOP) {
n_workers = 1;
quality = 0;
}
goto ok;
if (quality == 0) {
US_LOG_INFO("Using JPEG quality: encoder default");
} else {
US_LOG_INFO("Using JPEG quality: %u%%", quality);
}
use_cpu:
type = US_ENCODER_TYPE_CPU;
quality = dev->jpeg_quality;
US_MUTEX_LOCK(run->mutex);
run->type = type;
run->quality = quality;
US_MUTEX_UNLOCK(run->mutex);
ok:
if (type == US_ENCODER_TYPE_NOOP) {
US_LOG_INFO("Using JPEG NOOP encoder");
} else if (quality == 0) {
US_LOG_INFO("Using JPEG quality: encoder default");
} else {
US_LOG_INFO("Using JPEG quality: %u%%", quality);
}
const ldf desired_interval = (
cap->desired_fps > 0 && (cap->desired_fps < cap->run->hw_fps || cap->run->hw_fps == 0)
? (ldf)1 / cap->desired_fps
: 0
);
US_MUTEX_LOCK(_ER(mutex));
_ER(type) = type;
_ER(quality) = quality;
if (cpu_forced) {
_ER(cpu_forced) = true;
}
US_MUTEX_UNLOCK(_ER(mutex));
const long double desired_interval = (
dev->desired_fps > 0 && (dev->desired_fps < dev->run->hw_fps || dev->run->hw_fps == 0)
? (long double)1 / dev->desired_fps
: 0
);
enc->run->pool = us_workers_pool_init(
"JPEG", "jw", n_workers, desired_interval,
_worker_job_init, (void*)enc,
_worker_job_destroy,
_worker_run_job);
# undef DR
enc->run->pool = us_workers_pool_init(
"JPEG", "jw", n_workers, desired_interval,
_worker_job_init, (void*)enc,
_worker_job_destroy,
_worker_run_job);
}
void us_encoder_close(us_encoder_s *enc) {
@@ -178,11 +180,12 @@ void us_encoder_close(us_encoder_s *enc) {
US_DELETE(enc->run->pool, us_workers_pool_destroy);
}
void us_encoder_get_runtime_params(us_encoder_s *enc, us_encoder_type_e *type, unsigned *quality) {
US_MUTEX_LOCK(_ER(mutex));
*type = _ER(type);
*quality = _ER(quality);
US_MUTEX_UNLOCK(_ER(mutex));
void us_encoder_get_runtime_params(us_encoder_s *enc, us_encoder_type_e *type, uint *quality) {
us_encoder_runtime_s *const run = enc->run;
US_MUTEX_LOCK(run->mutex);
*type = run->type;
*quality = run->quality;
US_MUTEX_UNLOCK(run->mutex);
}
static void *_worker_job_init(void *v_enc) {
@@ -200,35 +203,28 @@ static void _worker_job_destroy(void *v_job) {
}
static bool _worker_run_job(us_worker_s *wr) {
us_encoder_job_s *job = wr->job;
us_encoder_s *enc = job->enc; // Just for _ER()
const us_frame_s *src = &job->hw->raw;
us_frame_s *dest = job->dest;
us_encoder_job_s *const job = wr->job;
us_encoder_runtime_s *const run = job->enc->run;
const us_frame_s *const src = &job->hw->raw;
us_frame_s *const dest = job->dest;
if (_ER(type) == US_ENCODER_TYPE_CPU) {
if (run->type == US_ENCODER_TYPE_CPU) {
US_LOG_VERBOSE("Compressing JPEG using CPU: worker=%s, buffer=%u",
wr->name, job->hw->buf.index);
us_cpu_encoder_compress(src, dest, _ER(quality));
us_cpu_encoder_compress(src, dest, run->quality);
} else if (_ER(type) == US_ENCODER_TYPE_HW) {
} else if (run->type == US_ENCODER_TYPE_HW) {
US_LOG_VERBOSE("Compressing JPEG using HW (just copying): worker=%s, buffer=%u",
wr->name, job->hw->buf.index);
us_hw_encoder_compress(src, dest);
} else if (_ER(type) == US_ENCODER_TYPE_M2M_VIDEO || _ER(type) == US_ENCODER_TYPE_M2M_IMAGE) {
} else if (run->type == US_ENCODER_TYPE_M2M_VIDEO || run->type == US_ENCODER_TYPE_M2M_IMAGE) {
US_LOG_VERBOSE("Compressing JPEG using M2M-%s: worker=%s, buffer=%u",
(_ER(type) == US_ENCODER_TYPE_M2M_VIDEO ? "VIDEO" : "IMAGE"), wr->name, job->hw->buf.index);
if (us_m2m_encoder_compress(_ER(m2ms[wr->number]), src, dest, false) < 0) {
(run->type == US_ENCODER_TYPE_M2M_VIDEO ? "VIDEO" : "IMAGE"), wr->name, job->hw->buf.index);
if (us_m2m_encoder_compress(run->m2ms[wr->number], src, dest, false) < 0) {
goto error;
}
} else if (_ER(type) == US_ENCODER_TYPE_NOOP) {
US_LOG_VERBOSE("Compressing JPEG using NOOP (do nothing): worker=%s, buffer=%u",
wr->name, job->hw->buf.index);
us_frame_encoding_begin(src, dest, V4L2_PIX_FMT_JPEG);
usleep(5000); // Просто чтобы работала логика desired_fps
dest->encode_end_ts = us_get_now_monotonic(); // us_frame_encoding_end()
} else {
assert(0 && "Unknown encoder type");
}
@@ -238,14 +234,9 @@ static bool _worker_run_job(us_worker_s *wr) {
job->dest->encode_end_ts - job->dest->encode_begin_ts,
wr->name,
job->hw->buf.index);
return true;
error:
US_LOG_ERROR("Compression failed: worker=%s, buffer=%u", wr->name, job->hw->buf.index);
US_LOG_ERROR("Error while compressing buffer, falling back to CPU");
US_MUTEX_LOCK(_ER(mutex));
_ER(cpu_forced) = true;
US_MUTEX_UNLOCK(_ER(mutex));
return false;
error:
US_LOG_ERROR("Compression failed: worker=%s, buffer=%u", wr->name, job->hw->buf.index);
return false;
}

View File

@@ -22,45 +22,32 @@
#pragma once
#include <stdlib.h>
#include <stdbool.h>
#include <strings.h>
#include <assert.h>
#include <pthread.h>
#include <linux/videodev2.h>
#include "../libs/tools.h"
#include "../libs/array.h"
#include "../libs/threading.h"
#include "../libs/logging.h"
#include "../libs/types.h"
#include "../libs/frame.h"
#include "../libs/device.h"
#include "../libs/capture.h"
#include "workers.h"
#include "m2m.h"
#include "encoders/cpu/encoder.h"
#include "encoders/hw/encoder.h"
#define ENCODER_TYPES_STR "CPU, HW, M2M-VIDEO, M2M-IMAGE"
#define ENCODER_TYPES_STR "CPU, HW, M2M-VIDEO, M2M-IMAGE, NOOP"
typedef enum {
US_ENCODER_TYPE_CPU,
US_ENCODER_TYPE_HW,
US_ENCODER_TYPE_M2M_VIDEO,
US_ENCODER_TYPE_M2M_IMAGE,
US_ENCODER_TYPE_NOOP,
} us_encoder_type_e;
typedef struct {
us_encoder_type_e type;
unsigned quality;
bool cpu_forced;
uint quality;
pthread_mutex_t mutex;
unsigned n_m2ms;
uint n_m2ms;
us_m2m_encoder_s **m2ms;
us_workers_pool_s *pool;
@@ -68,16 +55,16 @@ typedef struct {
typedef struct {
us_encoder_type_e type;
unsigned n_workers;
uint n_workers;
char *m2m_path;
us_encoder_runtime_s *run;
} us_encoder_s;
typedef struct {
us_encoder_s *enc;
us_hw_buffer_s *hw;
us_frame_s *dest;
us_encoder_s *enc;
us_capture_hwbuf_s *hw;
us_frame_s *dest;
} us_encoder_job_s;
@@ -87,7 +74,7 @@ void us_encoder_destroy(us_encoder_s *enc);
int us_encoder_parse_type(const char *str);
const char *us_encoder_type_to_string(us_encoder_type_e type);
void us_encoder_open(us_encoder_s *enc, us_device_s *dev);
void us_encoder_open(us_encoder_s *enc, us_capture_s *cap);
void us_encoder_close(us_encoder_s *enc);
void us_encoder_get_runtime_params(us_encoder_s *enc, us_encoder_type_e *type, unsigned *quality);
void us_encoder_get_runtime_params(us_encoder_s *enc, us_encoder_type_e *type, uint *quality);

View File

@@ -40,7 +40,10 @@ static void _jpeg_set_dest_frame(j_compress_ptr jpeg, us_frame_s *frame);
static void _jpeg_write_scanlines_yuv(struct jpeg_compress_struct *jpeg, const us_frame_s *frame);
static void _jpeg_write_scanlines_rgb565(struct jpeg_compress_struct *jpeg, const us_frame_s *frame);
static void _jpeg_write_scanlines_rgb24(struct jpeg_compress_struct *jpeg, const us_frame_s *frame);
#ifndef JCS_EXTENSIONS
#warning JCS_EXT_BGR is not supported, please use libjpeg-turbo
static void _jpeg_write_scanlines_bgr24(struct jpeg_compress_struct *jpeg, const us_frame_s *frame);
#endif
static void _jpeg_init_destination(j_compress_ptr jpeg);
static boolean _jpeg_empty_output_buffer(j_compress_ptr jpeg);
@@ -67,6 +70,9 @@ void us_cpu_encoder_compress(const us_frame_s *src, us_frame_s *dest, unsigned q
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_YVYU:
case V4L2_PIX_FMT_UYVY: jpeg.in_color_space = JCS_YCbCr; break;
# ifdef JCS_EXTENSIONS
case V4L2_PIX_FMT_BGR24: jpeg.in_color_space = JCS_EXT_BGR; break;
# endif
default: jpeg.in_color_space = JCS_RGB; break;
}
@@ -82,7 +88,13 @@ void us_cpu_encoder_compress(const us_frame_s *src, us_frame_s *dest, unsigned q
case V4L2_PIX_FMT_UYVY: _jpeg_write_scanlines_yuv(&jpeg, src); break;
case V4L2_PIX_FMT_RGB565: _jpeg_write_scanlines_rgb565(&jpeg, src); break;
case V4L2_PIX_FMT_RGB24: _jpeg_write_scanlines_rgb24(&jpeg, src); break;
case V4L2_PIX_FMT_BGR24: _jpeg_write_scanlines_bgr24(&jpeg, src); break;
case V4L2_PIX_FMT_BGR24:
# ifdef JCS_EXTENSIONS
_jpeg_write_scanlines_rgb24(&jpeg, src); // Use native JCS_EXT_BGR
# else
_jpeg_write_scanlines_bgr24(&jpeg, src);
# endif
break;
default: assert(0 && "Unsupported input format for CPU encoder"); return;
}
@@ -196,6 +208,7 @@ static void _jpeg_write_scanlines_rgb24(struct jpeg_compress_struct *jpeg, const
}
}
#ifndef JCS_EXTENSIONS
static void _jpeg_write_scanlines_bgr24(struct jpeg_compress_struct *jpeg, const us_frame_s *frame) {
uint8_t *line_buf;
US_CALLOC(line_buf, frame->width * 3);
@@ -222,6 +235,7 @@ static void _jpeg_write_scanlines_bgr24(struct jpeg_compress_struct *jpeg, const
free(line_buf);
}
#endif
#define JPEG_OUTPUT_BUFFER_SIZE ((size_t)4096)

View File

@@ -40,7 +40,7 @@ char *us_simplify_request_path(const char *str) {
char pre1; // The one before
char pre2; // The one before that
char *simplified;
char *start;
const char *start;
char *out;
char *slash;

View File

@@ -102,11 +102,11 @@ static const char *_http_get_header(struct evhttp_request *request, const char *
static char *_http_get_client_hostport(struct evhttp_request *request);
#define _S_LOG_ERROR(x_msg, ...) US_LOG_ERROR("HTTP: " x_msg, ##__VA_ARGS__)
#define _S_LOG_PERROR(x_msg, ...) US_LOG_PERROR("HTTP: " x_msg, ##__VA_ARGS__)
#define _S_LOG_INFO(x_msg, ...) US_LOG_INFO("HTTP: " x_msg, ##__VA_ARGS__)
#define _S_LOG_VERBOSE(x_msg, ...) US_LOG_VERBOSE("HTTP: " x_msg, ##__VA_ARGS__)
#define _S_LOG_DEBUG(x_msg, ...) US_LOG_DEBUG("HTTP: " x_msg, ##__VA_ARGS__)
#define _LOG_ERROR(x_msg, ...) US_LOG_ERROR("HTTP: " x_msg, ##__VA_ARGS__)
#define _LOG_PERROR(x_msg, ...) US_LOG_PERROR("HTTP: " x_msg, ##__VA_ARGS__)
#define _LOG_INFO(x_msg, ...) US_LOG_INFO("HTTP: " x_msg, ##__VA_ARGS__)
#define _LOG_VERBOSE(x_msg, ...) US_LOG_VERBOSE("HTTP: " x_msg, ##__VA_ARGS__)
#define _LOG_DEBUG(x_msg, ...) US_LOG_DEBUG("HTTP: " x_msg, ##__VA_ARGS__)
#define _A_EVBUFFER_NEW(x_buf) assert((x_buf = evbuffer_new()) != NULL)
#define _A_EVBUFFER_ADD(x_buf, x_data, x_size) assert(!evbuffer_add(x_buf, x_data, x_size))
@@ -120,6 +120,7 @@ us_server_s *us_server_init(us_stream_s *stream) {
us_server_exposed_s *exposed;
US_CALLOC(exposed, 1);
exposed->frame = us_frame_init();
exposed->queued_fpsi = us_fpsi_init("MJPEG-QUEUED", false);
us_server_runtime_s *run;
US_CALLOC(run, 1);
@@ -168,6 +169,7 @@ void us_server_destroy(us_server_s *server) {
});
US_LIST_ITERATE(run->stream_clients, client, { // cppcheck-suppress constStatement
us_fpsi_destroy(client->fpsi);
free(client->key);
free(client->hostport);
free(client);
@@ -175,6 +177,7 @@ void us_server_destroy(us_server_s *server) {
US_DELETE(run->auth_token, free);
us_fpsi_destroy(run->exposed->queued_fpsi);
us_frame_destroy(run->exposed->frame);
free(run->exposed);
free(server->run);
@@ -188,7 +191,7 @@ int us_server_listen(us_server_s *server) {
{
if (server->static_path[0] != '\0') {
_S_LOG_INFO("Enabling the file server: %s", server->static_path);
_LOG_INFO("Enabling the file server: %s", server->static_path);
evhttp_set_gencb(run->http, _http_callback_static, (void*)server);
} else {
assert(!evhttp_set_cb(run->http, "/", _http_callback_root, (void*)server));
@@ -205,8 +208,8 @@ int us_server_listen(us_server_s *server) {
{
struct timeval interval = {0};
if (stream->dev->desired_fps > 0) {
interval.tv_usec = 1000000 / (stream->dev->desired_fps * 2);
if (stream->cap->desired_fps > 0) {
interval.tv_usec = 1000000 / (stream->cap->desired_fps * 2);
} else {
interval.tv_usec = 16000; // ~60fps
}
@@ -227,11 +230,11 @@ int us_server_listen(us_server_s *server) {
US_ASPRINTF(run->auth_token, "Basic %s", encoded_token);
free(encoded_token);
_S_LOG_INFO("Using HTTP basic auth");
_LOG_INFO("Using HTTP basic auth");
}
if (server->unix_path[0] != '\0') {
_S_LOG_DEBUG("Binding server to UNIX socket '%s' ...", server->unix_path);
_LOG_DEBUG("Binding server to UNIX socket '%s' ...", server->unix_path);
if ((run->ext_fd = us_evhttp_bind_unix(
run->http,
server->unix_path,
@@ -240,33 +243,33 @@ int us_server_listen(us_server_s *server) {
) {
return -1;
}
_S_LOG_INFO("Listening HTTP on UNIX socket '%s'", server->unix_path);
_LOG_INFO("Listening HTTP on UNIX socket '%s'", server->unix_path);
# ifdef WITH_SYSTEMD
} else if (server->systemd) {
_S_LOG_DEBUG("Binding HTTP to systemd socket ...");
_LOG_DEBUG("Binding HTTP to systemd socket ...");
if ((run->ext_fd = us_evhttp_bind_systemd(run->http)) < 0) {
return -1;
}
_S_LOG_INFO("Listening systemd socket ...");
_LOG_INFO("Listening systemd socket ...");
# endif
} else {
_S_LOG_DEBUG("Binding HTTP to [%s]:%u ...", server->host, server->port);
_LOG_DEBUG("Binding HTTP to [%s]:%u ...", server->host, server->port);
if (evhttp_bind_socket(run->http, server->host, server->port) < 0) {
_S_LOG_PERROR("Can't bind HTTP on [%s]:%u", server->host, server->port)
_LOG_PERROR("Can't bind HTTP on [%s]:%u", server->host, server->port)
return -1;
}
_S_LOG_INFO("Listening HTTP on [%s]:%u", server->host, server->port);
_LOG_INFO("Listening HTTP on [%s]:%u", server->host, server->port);
}
return 0;
}
void us_server_loop(us_server_s *server) {
_S_LOG_INFO("Starting eventloop ...");
_LOG_INFO("Starting eventloop ...");
event_base_dispatch(server->run->base);
_S_LOG_INFO("Eventloop stopped");
_LOG_INFO("Eventloop stopped");
}
void us_server_loop_break(us_server_s *server) {
@@ -276,7 +279,7 @@ void us_server_loop_break(us_server_s *server) {
static int _http_preprocess_request(struct evhttp_request *request, us_server_s *server) {
const us_server_runtime_s *const run = server->run;
atomic_store(&server->stream->run->http_last_request_ts, us_get_now_monotonic());
atomic_store(&server->stream->run->http->last_request_ts, us_get_now_monotonic());
if (server->allow_origin[0] != '\0') {
const char *const cors_headers = _http_get_header(request, "Access-Control-Request-Headers");
@@ -407,18 +410,18 @@ static void _http_callback_static(struct evhttp_request *request, void *v_server
}
if ((fd = open(static_path, O_RDONLY)) < 0) {
_S_LOG_PERROR("Can't open found static file %s", static_path);
_LOG_PERROR("Can't open found static file %s", static_path);
goto not_found;
}
{
struct stat st;
if (fstat(fd, &st) < 0) {
_S_LOG_PERROR("Can't stat() found static file %s", static_path);
_LOG_PERROR("Can't stat() found static file %s", static_path);
goto not_found;
}
if (st.st_size > 0 && evbuffer_add_file(buf, fd, 0, st.st_size) < 0) {
_S_LOG_ERROR("Can't serve static file %s", static_path);
_LOG_ERROR("Can't serve static file %s", static_path);
goto not_found;
}
@@ -473,12 +476,27 @@ static void _http_callback_state(struct evhttp_request *request, void *v_server)
enc_quality
);
if (stream->run->h264 != NULL) {
# ifdef WITH_V4P
if (stream->drm != NULL) {
us_fpsi_meta_s meta;
const uint fps = us_fpsi_get(stream->run->http->drm_fpsi, &meta);
_A_EVBUFFER_ADD_PRINTF(buf,
" \"h264\": {\"bitrate\": %u, \"gop\": %u, \"online\": %s},",
" \"drm\": {\"live\": %s, \"fps\": %u},",
us_bool_to_string(meta.online),
fps
);
}
# endif
if (stream->h264_sink != NULL) {
us_fpsi_meta_s meta;
const uint fps = us_fpsi_get(stream->run->http->h264_fpsi, &meta);
_A_EVBUFFER_ADD_PRINTF(buf,
" \"h264\": {\"bitrate\": %u, \"gop\": %u, \"online\": %s, \"fps\": %u},",
stream->h264_bitrate,
stream->h264_gop,
us_bool_to_string(atomic_load(&stream->run->h264->online))
us_bool_to_string(meta.online),
fps
);
}
@@ -500,21 +518,18 @@ static void _http_callback_state(struct evhttp_request *request, void *v_server)
_A_EVBUFFER_ADD_PRINTF(buf, "},");
}
uint width;
uint height;
bool online;
uint captured_fps;
us_stream_get_capture_state(stream, &width, &height, &online, &captured_fps);
us_fpsi_meta_s captured_meta;
const uint captured_fps = us_fpsi_get(stream->run->http->captured_fpsi, &captured_meta);
_A_EVBUFFER_ADD_PRINTF(buf,
" \"source\": {\"resolution\": {\"width\": %u, \"height\": %u},"
" \"online\": %s, \"desired_fps\": %u, \"captured_fps\": %u},"
" \"stream\": {\"queued_fps\": %u, \"clients\": %u, \"clients_stat\": {",
(server->fake_width ? server->fake_width : width),
(server->fake_height ? server->fake_height : height),
us_bool_to_string(online),
stream->dev->desired_fps,
(server->fake_width ? server->fake_width : captured_meta.width),
(server->fake_height ? server->fake_height : captured_meta.height),
us_bool_to_string(captured_meta.online),
stream->cap->desired_fps,
captured_fps,
ex->queued_fps,
us_fpsi_get(ex->queued_fpsi, NULL),
run->stream_clients_count
);
@@ -523,7 +538,7 @@ static void _http_callback_state(struct evhttp_request *request, void *v_server)
"\"%" PRIx64 "\": {\"fps\": %u, \"extra_headers\": %s, \"advance_headers\": %s,"
" \"dual_final_frames\": %s, \"zero_data\": %s, \"key\": \"%s\"}%s",
client->id,
client->fps,
us_fpsi_get(client->fpsi, NULL),
us_bool_to_string(client->extra_headers),
us_bool_to_string(client->advance_headers),
us_bool_to_string(client->dual_final_frames),
@@ -551,7 +566,7 @@ static void _http_callback_snapshot(struct evhttp_request *request, void *v_serv
client->request = request;
client->request_ts = us_get_now_monotonic();
atomic_fetch_add(&server->stream->run->http_snapshot_requested, 1);
atomic_fetch_add(&server->stream->run->http->snapshot_requested, 1);
US_LIST_APPEND(server->run->snapshot_clients, client);
}
@@ -590,26 +605,33 @@ static void _http_callback_stream(struct evhttp_request *request, void *v_server
client->hostport = _http_get_client_hostport(request);
client->id = us_get_now_id();
{
char *name;
US_ASPRINTF(name, "MJPEG-CLIENT-%" PRIx64, client->id);
client->fpsi = us_fpsi_init(name, false);
free(name);
}
US_LIST_APPEND_C(run->stream_clients, client, run->stream_clients_count);
if (run->stream_clients_count == 1) {
atomic_store(&server->stream->run->http_has_clients, true);
atomic_store(&server->stream->run->http->has_clients, true);
# ifdef WITH_GPIO
us_gpio_set_has_http_clients(true);
# endif
}
_S_LOG_INFO("NEW client (now=%u): %s, id=%" PRIx64,
_LOG_INFO("NEW client (now=%u): %s, id=%" PRIx64,
run->stream_clients_count, client->hostport, client->id);
struct bufferevent *const buf_event = evhttp_connection_get_bufferevent(conn);
if (server->tcp_nodelay && run->ext_fd >= 0) {
_S_LOG_DEBUG("Setting up TCP_NODELAY to the client %s ...", client->hostport);
_LOG_DEBUG("Setting up TCP_NODELAY to the client %s ...", client->hostport);
const evutil_socket_t fd = bufferevent_getfd(buf_event);
assert(fd >= 0);
int on = 1;
if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (void*)&on, sizeof(on)) != 0) {
_S_LOG_PERROR("Can't set TCP_NODELAY to the client %s", client->hostport);
_LOG_PERROR("Can't set TCP_NODELAY to the client %s", client->hostport);
}
}
bufferevent_setcb(buf_event, NULL, NULL, _http_callback_stream_error, (void*)client);
@@ -626,15 +648,7 @@ static void _http_callback_stream_write(struct bufferevent *buf_event, void *v_c
us_server_s *const server = client->server;
us_server_exposed_s *const ex = server->run->exposed;
const ldf now_ts = us_get_now_monotonic();
const sll now_sec_ts = us_floor_ms(now_ts);
if (now_sec_ts != client->fps_ts) {
client->fps = client->fps_accum;
client->fps_accum = 0;
client->fps_ts = now_sec_ts;
}
client->fps_accum += 1;
us_fpsi_update(client->fpsi, true, NULL);
struct evbuffer *buf;
_A_EVBUFFER_NEW(buf);
@@ -716,6 +730,7 @@ static void _http_callback_stream_write(struct bufferevent *buf_event, void *v_c
us_get_now_real(),
(client->extra_headers ? "" : RN)
);
const ldf now_ts = us_get_now_monotonic();
if (client->extra_headers) {
_A_EVBUFFER_ADD_PRINTF(buf,
"X-UStreamer-Online: %s" RN
@@ -736,7 +751,7 @@ static void _http_callback_stream_write(struct bufferevent *buf_event, void *v_c
ex->dropped,
ex->frame->width,
ex->frame->height,
client->fps,
us_fpsi_get(client->fpsi, NULL),
ex->frame->grab_ts,
ex->frame->encode_begin_ts,
ex->frame->encode_end_ts,
@@ -779,20 +794,21 @@ static void _http_callback_stream_error(struct bufferevent *buf_event, short wha
US_LIST_REMOVE_C(run->stream_clients, client, run->stream_clients_count);
if (run->stream_clients_count == 0) {
atomic_store(&server->stream->run->http_has_clients, false);
atomic_store(&server->stream->run->http->has_clients, false);
# ifdef WITH_GPIO
us_gpio_set_has_http_clients(false);
# endif
}
char *const reason = us_bufferevent_format_reason(what);
_S_LOG_INFO("DEL client (now=%u): %s, id=%" PRIx64 ", %s",
_LOG_INFO("DEL client (now=%u): %s, id=%" PRIx64 ", %s",
run->stream_clients_count, client->hostport, client->id, reason);
free(reason);
struct evhttp_connection *conn = evhttp_request_get_connection(client->request);
US_DELETE(conn, evhttp_connection_free);
us_fpsi_destroy(client->fpsi);
free(client->key);
free(client->hostport);
free(client);
@@ -802,8 +818,8 @@ static void _http_send_stream(us_server_s *server, bool stream_updated, bool fra
us_server_runtime_s *const run = server->run;
us_server_exposed_s *const ex = run->exposed;
bool has_clients = false;
bool queued = false;
bool has_clients = true;
US_LIST_ITERATE(run->stream_clients, client, { // cppcheck-suppress constStatement
struct evhttp_connection *const conn = evhttp_request_get_connection(client->request);
@@ -833,23 +849,14 @@ static void _http_send_stream(us_server_s *server, bool stream_updated, bool fra
} else if (stream_updated) { // Для dual
client->updated_prev = false;
}
has_clients = true;
}
});
if (queued) {
static uint queued_fps_accum = 0;
static sll queued_fps_ts = 0;
const sll now_sec_ts = us_floor_ms(us_get_now_monotonic());
if (now_sec_ts != queued_fps_ts) {
ex->queued_fps = queued_fps_accum;
queued_fps_accum = 0;
queued_fps_ts = now_sec_ts;
}
queued_fps_accum += 1;
us_fpsi_update(ex->queued_fpsi, true, NULL);
} else if (!has_clients) {
ex->queued_fps = 0;
us_fpsi_update(ex->queued_fpsi, false, NULL);
}
}
@@ -866,24 +873,22 @@ static void _http_send_snapshot(us_server_s *server) {
US_SNPRINTF(header_buf, 255, "%u", x_value); \
_A_ADD_HEADER(request, x_key, header_buf); \
}
uint width;
uint height;
uint captured_fps; // Unused
bool online;
us_stream_get_capture_state(server->stream, &width, &height, &online, &captured_fps);
us_fpsi_meta_s captured_meta;
us_fpsi_get(server->stream->run->http->captured_fpsi, &captured_meta);
US_LIST_ITERATE(server->run->snapshot_clients, client, { // cppcheck-suppress constStatement
struct evhttp_request *request = client->request;
const bool has_fresh_snapshot = (atomic_load(&server->stream->run->http_snapshot_requested) == 0);
const bool has_fresh_snapshot = (atomic_load(&server->stream->run->http->snapshot_requested) == 0);
const bool timed_out = (client->request_ts + US_MAX((uint)1, server->stream->error_delay * 3) < us_get_now_monotonic());
if (has_fresh_snapshot || timed_out) {
us_frame_s *frame = ex->frame;
if (!online) {
if (!captured_meta.online) {
if (blank == NULL) {
blank = us_blank_init();
us_blank_draw(blank, "< NO SIGNAL >", width, height);
us_blank_draw(blank, "< NO SIGNAL >", captured_meta.width, captured_meta.height);
}
frame = blank->jpeg;
}
@@ -930,7 +935,7 @@ static void _http_refresher(int fd, short what, void *v_server) {
us_server_s *server = v_server;
us_server_exposed_s *ex = server->run->exposed;
us_ring_s *const ring = server->stream->run->http_jpeg_ring;
us_ring_s *const ring = server->stream->run->http->jpeg_ring;
bool stream_updated = false;
bool frame_updated = false;
@@ -942,7 +947,7 @@ static void _http_refresher(int fd, short what, void *v_server) {
stream_updated = true;
us_ring_consumer_release(ring, ri);
} else if (ex->expose_end_ts + 1 < us_get_now_monotonic()) {
_S_LOG_DEBUG("Repeating exposed ...");
_LOG_DEBUG("Repeating exposed ...");
ex->expose_begin_ts = us_get_now_monotonic();
ex->expose_cmp_ts = ex->expose_begin_ts;
ex->expose_end_ts = ex->expose_begin_ts;
@@ -972,7 +977,7 @@ static void _http_refresher(int fd, short what, void *v_server) {
static bool _expose_frame(us_server_s *server, const us_frame_s *frame) {
us_server_exposed_s *const ex = server->run->exposed;
_S_LOG_DEBUG("Updating exposed frame (online=%d) ...", frame->online);
_LOG_DEBUG("Updating exposed frame (online=%d) ...", frame->online);
ex->expose_begin_ts = us_get_now_monotonic();
if (server->drop_same_frames && frame->online) {
@@ -984,13 +989,13 @@ static bool _expose_frame(us_server_s *server, const us_frame_s *frame) {
) {
ex->expose_cmp_ts = us_get_now_monotonic();
ex->expose_end_ts = ex->expose_cmp_ts;
_S_LOG_VERBOSE("Dropped same frame number %u; cmp_time=%.06Lf",
_LOG_VERBOSE("Dropped same frame number %u; cmp_time=%.06Lf",
ex->dropped, (ex->expose_cmp_ts - ex->expose_begin_ts));
ex->dropped += 1;
return false; // Not updated
} else {
ex->expose_cmp_ts = us_get_now_monotonic();
_S_LOG_VERBOSE("Passed same frame check (need_drop=%d, maybe_same=%d); cmp_time=%.06Lf",
_LOG_VERBOSE("Passed same frame check (need_drop=%d, maybe_same=%d); cmp_time=%.06Lf",
need_drop, maybe_same, (ex->expose_cmp_ts - ex->expose_begin_ts));
}
}
@@ -1007,7 +1012,7 @@ static bool _expose_frame(us_server_s *server, const us_frame_s *frame) {
ex->expose_cmp_ts = ex->expose_begin_ts;
ex->expose_end_ts = us_get_now_monotonic();
_S_LOG_VERBOSE("Exposed frame: online=%d, exp_time=%.06Lf",
_LOG_VERBOSE("Exposed frame: online=%d, exp_time=%.06Lf",
ex->frame->online, (ex->expose_end_ts - ex->expose_begin_ts));
return true; // Updated
}

View File

@@ -31,11 +31,12 @@
#include "../../libs/types.h"
#include "../../libs/frame.h"
#include "../../libs/list.h"
#include "../../libs/fpsi.h"
#include "../encoder.h"
#include "../stream.h"
typedef struct us_stream_client_sx {
typedef struct {
struct us_server_sx *server;
struct evhttp_request *request;
@@ -50,25 +51,23 @@ typedef struct us_stream_client_sx {
bool need_initial;
bool need_first_frame;
bool updated_prev;
uint fps_accum;
sll fps_ts;
uint fps;
US_LIST_STRUCT(struct us_stream_client_sx);
us_fpsi_s *fpsi;
US_LIST_DECLARE;
} us_stream_client_s;
typedef struct us_snapshot_client_sx {
typedef struct {
struct us_server_sx *server;
struct evhttp_request *request;
ldf request_ts;
US_LIST_STRUCT(struct us_snapshot_client_sx);
US_LIST_DECLARE;
} us_snapshot_client_s;
typedef struct {
us_frame_s *frame;
uint captured_fps;
uint queued_fps;
us_fpsi_s *queued_fpsi;
uint dropped;
ldf expose_begin_ts;
ldf expose_cmp_ts;

View File

@@ -56,11 +56,11 @@ static void _m2m_encoder_cleanup(us_m2m_encoder_s *enc);
static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *src, us_frame_s *dest, bool force_key);
#define _E_LOG_ERROR(x_msg, ...) US_LOG_ERROR("%s: " x_msg, enc->name, ##__VA_ARGS__)
#define _E_LOG_PERROR(x_msg, ...) US_LOG_PERROR("%s: " x_msg, enc->name, ##__VA_ARGS__)
#define _E_LOG_INFO(x_msg, ...) US_LOG_INFO("%s: " x_msg, enc->name, ##__VA_ARGS__)
#define _E_LOG_VERBOSE(x_msg, ...) US_LOG_VERBOSE("%s: " x_msg, enc->name, ##__VA_ARGS__)
#define _E_LOG_DEBUG(x_msg, ...) US_LOG_DEBUG("%s: " x_msg, enc->name, ##__VA_ARGS__)
#define _LOG_ERROR(x_msg, ...) US_LOG_ERROR("%s: " x_msg, enc->name, ##__VA_ARGS__)
#define _LOG_PERROR(x_msg, ...) US_LOG_PERROR("%s: " x_msg, enc->name, ##__VA_ARGS__)
#define _LOG_INFO(x_msg, ...) US_LOG_INFO("%s: " x_msg, enc->name, ##__VA_ARGS__)
#define _LOG_VERBOSE(x_msg, ...) US_LOG_VERBOSE("%s: " x_msg, enc->name, ##__VA_ARGS__)
#define _LOG_DEBUG(x_msg, ...) US_LOG_DEBUG("%s: " x_msg, enc->name, ##__VA_ARGS__)
us_m2m_encoder_s *us_m2m_h264_encoder_init(const char *name, const char *path, uint bitrate, uint gop) {
@@ -85,7 +85,7 @@ us_m2m_encoder_s *us_m2m_jpeg_encoder_init(const char *name, const char *path, u
}
void us_m2m_encoder_destroy(us_m2m_encoder_s *enc) {
_E_LOG_INFO("Destroying encoder ...");
_LOG_INFO("Destroying encoder ...");
_m2m_encoder_cleanup(enc);
free(enc->path);
free(enc->name);
@@ -95,29 +95,45 @@ void us_m2m_encoder_destroy(us_m2m_encoder_s *enc) {
int us_m2m_encoder_compress(us_m2m_encoder_s *enc, const us_frame_s *src, us_frame_s *dest, bool force_key) {
us_m2m_encoder_runtime_s *const run = enc->run;
us_frame_encoding_begin(src, dest, (enc->output_format == V4L2_PIX_FMT_MJPEG ? V4L2_PIX_FMT_JPEG : enc->output_format));
uint dest_format = enc->output_format;
switch (enc->output_format) {
case V4L2_PIX_FMT_JPEG:
force_key = false;
// fall through
case V4L2_PIX_FMT_MJPEG:
dest_format = V4L2_PIX_FMT_JPEG;
break;
case V4L2_PIX_FMT_H264:
force_key = (
force_key
|| run->last_online != src->online
|| run->last_encode_ts + 0.5 < us_get_now_monotonic()
);
break;
}
us_frame_encoding_begin(src, dest, dest_format);
_m2m_encoder_ensure(enc, src);
if (!run->ready) { // Already prepared but failed
return -1;
}
force_key = (enc->output_format == V4L2_PIX_FMT_H264 && (force_key || run->last_online != src->online));
_E_LOG_DEBUG("Compressing new frame; force_key=%d ...", force_key);
_LOG_DEBUG("Compressing new frame; force_key=%d ...", force_key);
if (_m2m_encoder_compress_raw(enc, src, dest, force_key) < 0) {
_m2m_encoder_cleanup(enc);
_E_LOG_ERROR("Encoder destroyed due an error (compress)");
_LOG_ERROR("Encoder destroyed due an error (compress)");
return -1;
}
us_frame_encoding_end(dest);
_E_LOG_VERBOSE("Compressed new frame: size=%zu, time=%0.3Lf, force_key=%d",
_LOG_VERBOSE("Compressed new frame: size=%zu, time=%0.3Lf, force_key=%d",
dest->used, dest->encode_end_ts - dest->encode_begin_ts, force_key);
run->last_online = src->online;
run->last_encode_ts = dest->encode_end_ts;
return 0;
}
@@ -151,7 +167,7 @@ static us_m2m_encoder_s *_m2m_encoder_init(
#define _E_XIOCTL(x_request, x_value, x_msg, ...) { \
if (us_xioctl(run->fd, x_request, x_value) < 0) { \
_E_LOG_PERROR(x_msg, ##__VA_ARGS__); \
_LOG_PERROR(x_msg, ##__VA_ARGS__); \
goto error; \
} \
}
@@ -170,9 +186,9 @@ static void _m2m_encoder_ensure(us_m2m_encoder_s *enc, const us_frame_s *frame)
return; // Configured already
}
_E_LOG_INFO("Configuring encoder: DMA=%d ...", dma);
_LOG_INFO("Configuring encoder: DMA=%d ...", dma);
_E_LOG_DEBUG("Encoder changes: width=%u->%u, height=%u->%u, input_format=%u->%u, stride=%u->%u, dma=%u->%u",
_LOG_DEBUG("Encoder changes: width=%u->%u, height=%u->%u, input_format=%u->%u, stride=%u->%u, dma=%u->%u",
run->p_width, frame->width,
run->p_height, frame->height,
run->p_input_format, frame->format,
@@ -187,18 +203,18 @@ static void _m2m_encoder_ensure(us_m2m_encoder_s *enc, const us_frame_s *frame)
run->p_stride = frame->stride;
run->p_dma = dma;
_E_LOG_DEBUG("Opening encoder device ...");
_LOG_DEBUG("Opening encoder device ...");
if ((run->fd = open(enc->path, O_RDWR)) < 0) {
_E_LOG_PERROR("Can't open encoder device");
_LOG_PERROR("Can't open encoder device");
goto error;
}
_E_LOG_DEBUG("Encoder device fd=%d opened", run->fd);
_LOG_DEBUG("Encoder device fd=%d opened", run->fd);
# define SET_OPTION(x_cid, x_value) { \
struct v4l2_control m_ctl = {0}; \
m_ctl.id = x_cid; \
m_ctl.value = x_value; \
_E_LOG_DEBUG("Configuring option " #x_cid " ..."); \
_LOG_DEBUG("Configuring option " #x_cid " ..."); \
_E_XIOCTL(VIDIOC_S_CTRL, &m_ctl, "Can't set option " #x_cid); \
}
if (enc->output_format == V4L2_PIX_FMT_H264) {
@@ -227,10 +243,10 @@ static void _m2m_encoder_ensure(us_m2m_encoder_s *enc, const us_frame_s *frame)
fmt.fmt.pix_mp.height = run->p_height;
fmt.fmt.pix_mp.pixelformat = run->p_input_format;
fmt.fmt.pix_mp.field = V4L2_FIELD_ANY;
fmt.fmt.pix_mp.colorspace = V4L2_COLORSPACE_JPEG; // libcamera currently has no means to request the right colour space
fmt.fmt.pix_mp.colorspace = V4L2_COLORSPACE_JPEG; // FIXME: Wrong colors
fmt.fmt.pix_mp.num_planes = 1;
// fmt.fmt.pix_mp.plane_fmt[0].bytesperline = run->p_stride;
_E_LOG_DEBUG("Configuring INPUT format ...");
_LOG_DEBUG("Configuring INPUT format ...");
_E_XIOCTL(VIDIOC_S_FMT, &fmt, "Can't set INPUT format");
}
@@ -249,13 +265,13 @@ static void _m2m_encoder_ensure(us_m2m_encoder_s *enc, const us_frame_s *frame)
// https://github.com/raspberrypi/linux/pull/5232
fmt.fmt.pix_mp.plane_fmt[0].sizeimage = (1024 + 512) << 10; // 1.5Mb
}
_E_LOG_DEBUG("Configuring OUTPUT format ...");
_LOG_DEBUG("Configuring OUTPUT format ...");
_E_XIOCTL(VIDIOC_S_FMT, &fmt, "Can't set OUTPUT format");
if (fmt.fmt.pix_mp.pixelformat != enc->output_format) {
char fourcc_str[8];
_E_LOG_ERROR("The OUTPUT format can't be configured as %s",
_LOG_ERROR("The OUTPUT format can't be configured as %s",
us_fourcc_to_string(enc->output_format, fourcc_str, 8));
_E_LOG_ERROR("In case of Raspberry Pi, try to append 'start_x=1' to /boot/config.txt");
_LOG_ERROR("In case of Raspberry Pi, try to append 'start_x=1' to /boot/config.txt");
goto error;
}
}
@@ -277,7 +293,7 @@ static void _m2m_encoder_ensure(us_m2m_encoder_s *enc, const us_frame_s *frame)
setfps.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
setfps.parm.output.timeperframe.numerator = 1;
setfps.parm.output.timeperframe.denominator = run->fps_limit;
_E_LOG_DEBUG("Configuring INPUT FPS ...");
_LOG_DEBUG("Configuring INPUT FPS ...");
_E_XIOCTL(VIDIOC_S_PARM, &setfps, "Can't set INPUT FPS");
}
@@ -296,21 +312,21 @@ static void _m2m_encoder_ensure(us_m2m_encoder_s *enc, const us_frame_s *frame)
{
enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
_E_LOG_DEBUG("Starting INPUT ...");
_LOG_DEBUG("Starting INPUT ...");
_E_XIOCTL(VIDIOC_STREAMON, &type, "Can't start INPUT");
type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
_E_LOG_DEBUG("Starting OUTPUT ...");
_LOG_DEBUG("Starting OUTPUT ...");
_E_XIOCTL(VIDIOC_STREAMON, &type, "Can't start OUTPUT");
}
run->ready = true;
_E_LOG_INFO("Encoder is ready");
_LOG_INFO("Encoder is ready");
return;
error:
_m2m_encoder_cleanup(enc);
_E_LOG_ERROR("Encoder destroyed due an error (prepare)");
_LOG_ERROR("Encoder destroyed due an error (prepare)");
}
static int _m2m_encoder_init_buffers(
@@ -319,20 +335,20 @@ static int _m2m_encoder_init_buffers(
us_m2m_encoder_runtime_s *const run = enc->run;
_E_LOG_DEBUG("Initializing %s buffers ...", name);
_LOG_DEBUG("Initializing %s buffers ...", name);
struct v4l2_requestbuffers req = {0};
req.count = 1;
req.type = type;
req.memory = (dma ? V4L2_MEMORY_DMABUF : V4L2_MEMORY_MMAP);
_E_LOG_DEBUG("Requesting %u %s buffers ...", req.count, name);
_LOG_DEBUG("Requesting %u %s buffers ...", req.count, name);
_E_XIOCTL(VIDIOC_REQBUFS, &req, "Can't request %s buffers", name);
if (req.count < 1) {
_E_LOG_ERROR("Insufficient %s buffer memory: %u", name, req.count);
_LOG_ERROR("Insufficient %s buffer memory: %u", name, req.count);
goto error;
}
_E_LOG_DEBUG("Got %u %s buffers", req.count, name);
_LOG_DEBUG("Got %u %s buffers", req.count, name);
if (dma) {
*n_bufs_ptr = req.count;
@@ -349,25 +365,25 @@ static int _m2m_encoder_init_buffers(
buf.length = 1;
buf.m.planes = &plane;
_E_LOG_DEBUG("Querying %s buffer=%u ...", name, *n_bufs_ptr);
_LOG_DEBUG("Querying %s buffer=%u ...", name, *n_bufs_ptr);
_E_XIOCTL(VIDIOC_QUERYBUF, &buf, "Can't query %s buffer=%u", name, *n_bufs_ptr);
_E_LOG_DEBUG("Mapping %s buffer=%u ...", name, *n_bufs_ptr);
_LOG_DEBUG("Mapping %s buffer=%u ...", name, *n_bufs_ptr);
if (((*bufs_ptr)[*n_bufs_ptr].data = mmap(
NULL, plane.length,
PROT_READ | PROT_WRITE, MAP_SHARED,
run->fd, plane.m.mem_offset
)) == MAP_FAILED) {
_E_LOG_PERROR("Can't map %s buffer=%u", name, *n_bufs_ptr);
_LOG_PERROR("Can't map %s buffer=%u", name, *n_bufs_ptr);
goto error;
}
assert((*bufs_ptr)[*n_bufs_ptr].data != NULL);
(*bufs_ptr)[*n_bufs_ptr].allocated = plane.length;
_E_LOG_DEBUG("Queuing %s buffer=%u ...", name, *n_bufs_ptr);
_LOG_DEBUG("Queuing %s buffer=%u ...", name, *n_bufs_ptr);
_E_XIOCTL(VIDIOC_QBUF, &buf, "Can't queue %s buffer=%u", name, *n_bufs_ptr);
}
_E_LOG_DEBUG("All %s buffers are ready", name);
_LOG_DEBUG("All %s buffers are ready", name);
return 0;
error: // Mostly for _E_XIOCTL
@@ -383,9 +399,9 @@ static void _m2m_encoder_cleanup(us_m2m_encoder_s *enc) {
say = true;
# define STOP_STREAM(x_name, x_type) { \
enum v4l2_buf_type m_type_var = x_type; \
_E_LOG_DEBUG("Stopping %s ...", x_name); \
_LOG_DEBUG("Stopping %s ...", x_name); \
if (us_xioctl(run->fd, VIDIOC_STREAMOFF, &m_type_var) < 0) { \
_E_LOG_PERROR("Can't stop %s", x_name); \
_LOG_PERROR("Can't stop %s", x_name); \
} \
}
STOP_STREAM("OUTPUT", V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
@@ -400,7 +416,7 @@ static void _m2m_encoder_cleanup(us_m2m_encoder_s *enc) {
us_m2m_buffer_s *m_buf = &run->x_target##_bufs[m_index]; \
if (m_buf->allocated > 0 && m_buf->data != NULL) { \
if (munmap(m_buf->data, m_buf->allocated) < 0) { \
_E_LOG_PERROR("Can't unmap %s buffer=%u", #x_name, m_index); \
_LOG_PERROR("Can't unmap %s buffer=%u", #x_name, m_index); \
} \
} \
} \
@@ -415,7 +431,7 @@ static void _m2m_encoder_cleanup(us_m2m_encoder_s *enc) {
if (run->fd >= 0) {
say = true;
if (close(run->fd) < 0) {
_E_LOG_PERROR("Can't close encoder device");
_LOG_PERROR("Can't close encoder device");
}
run->fd = -1;
}
@@ -424,7 +440,7 @@ static void _m2m_encoder_cleanup(us_m2m_encoder_s *enc) {
run->ready = false;
if (say) {
_E_LOG_INFO("Encoder closed");
_LOG_INFO("Encoder closed");
}
}
@@ -437,7 +453,7 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
struct v4l2_control ctl = {0};
ctl.id = V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME;
ctl.value = 1;
_E_LOG_DEBUG("Forcing keyframe ...")
_LOG_DEBUG("Forcing keyframe ...")
_E_XIOCTL(VIDIOC_S_CTRL, &ctl, "Can't force keyframe");
}
@@ -452,17 +468,17 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
input_buf.memory = V4L2_MEMORY_DMABUF;
input_buf.field = V4L2_FIELD_NONE;
input_plane.m.fd = src->dma_fd;
_E_LOG_DEBUG("Using INPUT-DMA buffer=%u", input_buf.index);
_LOG_DEBUG("Using INPUT-DMA buffer=%u", input_buf.index);
} else {
input_buf.memory = V4L2_MEMORY_MMAP;
_E_LOG_DEBUG("Grabbing INPUT buffer ...");
_LOG_DEBUG("Grabbing INPUT buffer ...");
_E_XIOCTL(VIDIOC_DQBUF, &input_buf, "Can't grab INPUT buffer");
if (input_buf.index >= run->n_input_bufs) {
_E_LOG_ERROR("V4L2 error: grabbed invalid INPUT: buffer=%u, n_bufs=%u",
_LOG_ERROR("V4L2 error: grabbed invalid INPUT: buffer=%u, n_bufs=%u",
input_buf.index, run->n_input_bufs);
goto error;
}
_E_LOG_DEBUG("Grabbed INPUT buffer=%u", input_buf.index);
_LOG_DEBUG("Grabbed INPUT buffer=%u", input_buf.index);
}
const u64 now_ts = us_get_now_monotonic_u64();
@@ -481,7 +497,7 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
const char *input_name = (run->p_dma ? "INPUT-DMA" : "INPUT");
_E_LOG_DEBUG("Sending%s %s buffer ...", (!run->p_dma ? " (releasing)" : ""), input_name);
_LOG_DEBUG("Sending%s %s buffer ...", (!run->p_dma ? " (releasing)" : ""), input_name);
_E_XIOCTL(VIDIOC_QBUF, &input_buf, "Can't send %s buffer", input_name);
// Для не-DMA отправка буфера по факту являтся освобождением этого буфера
@@ -493,20 +509,20 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
while (true) {
if (us_get_now_monotonic() > deadline_ts) {
_E_LOG_ERROR("Waiting for the encoder is too long");
_LOG_ERROR("Waiting for the encoder is too long");
goto error;
}
struct pollfd enc_poll = {run->fd, POLLIN, 0};
_E_LOG_DEBUG("Polling encoder ...");
_LOG_DEBUG("Polling encoder ...");
if (poll(&enc_poll, 1, 1000) < 0 && errno != EINTR) {
_E_LOG_PERROR("Can't poll encoder");
_LOG_PERROR("Can't poll encoder");
goto error;
}
if (enc_poll.revents & POLLIN) {
if (!input_released) {
_E_LOG_DEBUG("Releasing %s buffer=%u ...", input_name, input_buf.index);
_LOG_DEBUG("Releasing %s buffer=%u ...", input_name, input_buf.index);
_E_XIOCTL(VIDIOC_DQBUF, &input_buf, "Can't release %s buffer=%u",
input_name, input_buf.index);
input_released = true;
@@ -518,7 +534,7 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
output_buf.memory = V4L2_MEMORY_MMAP;
output_buf.length = 1;
output_buf.m.planes = &output_plane;
_E_LOG_DEBUG("Fetching OUTPUT buffer ...");
_LOG_DEBUG("Fetching OUTPUT buffer ...");
_E_XIOCTL(VIDIOC_DQBUF, &output_buf, "Can't fetch OUTPUT buffer");
bool done = false;
@@ -526,7 +542,7 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
// Енкодер первый раз может выдать буфер с мусором и нулевым таймстампом,
// так что нужно убедиться, что мы читаем выходной буфер, соответствующий
// входному (с тем же таймстампом).
_E_LOG_DEBUG("Need to retry OUTPUT buffer due timestamp mismatch");
_LOG_DEBUG("Need to retry OUTPUT buffer due timestamp mismatch");
} else {
us_frame_set_data(dest, run->output_bufs[output_buf.index].data, output_plane.bytesused);
dest->key = output_buf.flags & V4L2_BUF_FLAG_KEYFRAME;
@@ -534,7 +550,7 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
done = true;
}
_E_LOG_DEBUG("Releasing OUTPUT buffer=%u ...", output_buf.index);
_LOG_DEBUG("Releasing OUTPUT buffer=%u ...", output_buf.index);
_E_XIOCTL(VIDIOC_QBUF, &output_buf, "Can't release OUTPUT buffer=%u", output_buf.index);
if (done) {

View File

@@ -47,6 +47,7 @@ typedef struct {
bool ready;
int last_online;
ldf last_encode_ts;
} us_m2m_encoder_runtime_s;
typedef struct {

View File

@@ -28,7 +28,7 @@
#include "../libs/tools.h"
#include "../libs/threading.h"
#include "../libs/logging.h"
#include "../libs/device.h"
#include "../libs/capture.h"
#include "../libs/signal.h"
#include "options.h"
@@ -84,12 +84,12 @@ int main(int argc, char *argv[]) {
US_THREAD_RENAME("main");
us_options_s *options = us_options_init(argc, argv);
us_device_s *dev = us_device_init();
us_capture_s *cap = us_capture_init();
us_encoder_s *enc = us_encoder_init();
_g_stream = us_stream_init(dev, enc);
_g_stream = us_stream_init(cap, enc);
_g_server = us_server_init(_g_stream);
if ((exit_code = options_parse(options, dev, enc, _g_stream, _g_server)) == 0) {
if ((exit_code = options_parse(options, cap, enc, _g_stream, _g_server)) == 0) {
# ifdef WITH_GPIO
us_gpio_init();
# endif
@@ -118,7 +118,7 @@ int main(int argc, char *argv[]) {
us_server_destroy(_g_server);
us_stream_destroy(_g_stream);
us_encoder_destroy(enc);
us_device_destroy(dev);
us_capture_destroy(cap);
us_options_destroy(options);
if (exit_code == 0) {

View File

@@ -32,6 +32,7 @@ enum _US_OPT_VALUES {
_O_IO_METHOD = 'I',
_O_DESIRED_FPS = 'f',
_O_MIN_FRAME_SIZE = 'z',
_O_ALLOW_TRUNCATED_FRAMES = 'T',
_O_PERSISTENT = 'n',
_O_DV_TIMINGS = 't',
_O_BUFFERS = 'b',
@@ -61,6 +62,7 @@ enum _US_OPT_VALUES {
_O_DEVICE_TIMEOUT = 10000,
_O_DEVICE_ERROR_DELAY,
_O_FORMAT_SWAP_RGB,
_O_M2M_DEVICE,
_O_IMAGE_DEFAULT,
@@ -100,6 +102,10 @@ enum _US_OPT_VALUES {
_O_H264_M2M_DEVICE,
# undef ADD_SINK
# ifdef WITH_V4P
_O_V4P,
# endif
# ifdef WITH_GPIO
_O_GPIO_DEVICE,
_O_GPIO_CONSUMER_PREFIX,
@@ -132,10 +138,12 @@ static const struct option _LONG_OPTS[] = {
{"input", required_argument, NULL, _O_INPUT},
{"resolution", required_argument, NULL, _O_RESOLUTION},
{"format", required_argument, NULL, _O_FORMAT},
{"format-swap-rgb", required_argument, NULL, _O_FORMAT_SWAP_RGB},
{"tv-standard", required_argument, NULL, _O_TV_STANDARD},
{"io-method", required_argument, NULL, _O_IO_METHOD},
{"desired-fps", required_argument, NULL, _O_DESIRED_FPS},
{"min-frame-size", required_argument, NULL, _O_MIN_FRAME_SIZE},
{"allow-truncated-frames", no_argument, NULL, _O_ALLOW_TRUNCATED_FRAMES},
{"persistent", no_argument, NULL, _O_PERSISTENT},
{"dv-timings", no_argument, NULL, _O_DV_TIMINGS},
{"buffers", required_argument, NULL, _O_BUFFERS},
@@ -204,6 +212,10 @@ static const struct option _LONG_OPTS[] = {
{"sink-client-ttl", required_argument, NULL, _O_JPEG_SINK_CLIENT_TTL},
{"sink-timeout", required_argument, NULL, _O_JPEG_SINK_TIMEOUT},
# ifdef WITH_V4P
{"v4p", no_argument, NULL, _O_V4P},
# endif
# ifdef WITH_GPIO
{"gpio-device", required_argument, NULL, _O_GPIO_DEVICE},
{"gpio-consumer-prefix", required_argument, NULL, _O_GPIO_CONSUMER_PREFIX},
@@ -240,7 +252,7 @@ static int _parse_resolution(const char *str, unsigned *width, unsigned *height,
static int _check_instance_id(const char *str);
static void _features(void);
static void _help(FILE *fp, const us_device_s *dev, const us_encoder_s *enc, const us_stream_s *stream, const us_server_s *server);
static void _help(FILE *fp, const us_capture_s *cap, const us_encoder_s *enc, const us_stream_s *stream, const us_server_s *server);
us_options_s *us_options_init(unsigned argc, char *argv[]) {
@@ -260,6 +272,9 @@ void us_options_destroy(us_options_s *options) {
US_DELETE(options->jpeg_sink, us_memsink_destroy);
US_DELETE(options->raw_sink, us_memsink_destroy);
US_DELETE(options->h264_sink, us_memsink_destroy);
# ifdef WITH_V4P
US_DELETE(options->drm, us_drm_destroy);
# endif
for (unsigned index = 0; index < options->argc; ++index) {
free(options->argv_copy[index]);
@@ -270,7 +285,7 @@ void us_options_destroy(us_options_s *options) {
}
int options_parse(us_options_s *options, us_device_s *dev, us_encoder_s *enc, us_stream_s *stream, us_server_s *server) {
int options_parse(us_options_s *options, us_capture_s *cap, us_encoder_s *enc, us_stream_s *stream, us_server_s *server) {
# define OPT_SET(x_dest, x_value) { \
x_dest = x_value; \
break; \
@@ -314,15 +329,15 @@ int options_parse(us_options_s *options, us_device_s *dev, us_encoder_s *enc, us
}
# define OPT_CTL_DEFAULT_NOBREAK(x_dest) { \
dev->ctl.x_dest.mode = CTL_MODE_DEFAULT; \
cap->ctl.x_dest.mode = CTL_MODE_DEFAULT; \
}
# define OPT_CTL_MANUAL(x_dest) { \
if (!strcasecmp(optarg, "default")) { \
OPT_CTL_DEFAULT_NOBREAK(x_dest); \
} else { \
dev->ctl.x_dest.mode = CTL_MODE_VALUE; \
OPT_NUMBER("--"#x_dest, dev->ctl.x_dest.value, INT_MIN, INT_MAX, 0); \
cap->ctl.x_dest.mode = CTL_MODE_VALUE; \
OPT_NUMBER("--"#x_dest, cap->ctl.x_dest.value, INT_MIN, INT_MAX, 0); \
} \
break; \
}
@@ -331,16 +346,16 @@ int options_parse(us_options_s *options, us_device_s *dev, us_encoder_s *enc, us
if (!strcasecmp(optarg, "default")) { \
OPT_CTL_DEFAULT_NOBREAK(x_dest); \
} else if (!strcasecmp(optarg, "auto")) { \
dev->ctl.x_dest.mode = CTL_MODE_AUTO; \
cap->ctl.x_dest.mode = CTL_MODE_AUTO; \
} else { \
dev->ctl.x_dest.mode = CTL_MODE_VALUE; \
OPT_NUMBER("--"#x_dest, dev->ctl.x_dest.value, INT_MIN, INT_MAX, 0); \
cap->ctl.x_dest.mode = CTL_MODE_VALUE; \
OPT_NUMBER("--"#x_dest, cap->ctl.x_dest.value, INT_MIN, INT_MAX, 0); \
} \
break; \
}
# define ADD_SINK(x_prefix) \
char *x_prefix##_name = NULL; \
const char *x_prefix##_name = NULL; \
mode_t x_prefix##_mode = 0660; \
bool x_prefix##_rm = false; \
unsigned x_prefix##_client_ttl = 10; \
@@ -351,7 +366,7 @@ int options_parse(us_options_s *options, us_device_s *dev, us_encoder_s *enc, us
# undef ADD_SINK
# ifdef WITH_SETPROCTITLE
char *process_name_prefix = NULL;
const char *process_name_prefix = NULL;
# endif
char short_opts[128];
@@ -359,28 +374,30 @@ int options_parse(us_options_s *options, us_device_s *dev, us_encoder_s *enc, us
for (int ch; (ch = getopt_long(options->argc, options->argv_copy, short_opts, _LONG_OPTS, NULL)) >= 0;) {
switch (ch) {
case _O_DEVICE: OPT_SET(dev->path, optarg);
case _O_INPUT: OPT_NUMBER("--input", dev->input, 0, 128, 0);
case _O_RESOLUTION: OPT_RESOLUTION("--resolution", dev->width, dev->height, true);
case _O_DEVICE: OPT_SET(cap->path, optarg);
case _O_INPUT: OPT_NUMBER("--input", cap->input, 0, 128, 0);
case _O_RESOLUTION: OPT_RESOLUTION("--resolution", cap->width, cap->height, true);
# pragma GCC diagnostic ignored "-Wsign-compare"
# pragma GCC diagnostic push
case _O_FORMAT: OPT_PARSE_ENUM("pixel format", dev->format, us_device_parse_format, US_FORMATS_STR);
case _O_FORMAT: OPT_PARSE_ENUM("pixel format", cap->format, us_capture_parse_format, US_FORMATS_STR);
# pragma GCC diagnostic pop
case _O_TV_STANDARD: OPT_PARSE_ENUM("TV standard", dev->standard, us_device_parse_standard, US_STANDARDS_STR);
case _O_IO_METHOD: OPT_PARSE_ENUM("IO method", dev->io_method, us_device_parse_io_method, US_IO_METHODS_STR);
case _O_DESIRED_FPS: OPT_NUMBER("--desired-fps", dev->desired_fps, 0, US_VIDEO_MAX_FPS, 0);
case _O_MIN_FRAME_SIZE: OPT_NUMBER("--min-frame-size", dev->min_frame_size, 1, 8192, 0);
case _O_PERSISTENT: OPT_SET(dev->persistent, true);
case _O_DV_TIMINGS: OPT_SET(dev->dv_timings, true);
case _O_BUFFERS: OPT_NUMBER("--buffers", dev->n_bufs, 1, 32, 0);
case _O_FORMAT_SWAP_RGB: OPT_SET(cap->format_swap_rgb, true);
case _O_TV_STANDARD: OPT_PARSE_ENUM("TV standard", cap->standard, us_capture_parse_standard, US_STANDARDS_STR);
case _O_IO_METHOD: OPT_PARSE_ENUM("IO method", cap->io_method, us_capture_parse_io_method, US_IO_METHODS_STR);
case _O_DESIRED_FPS: OPT_NUMBER("--desired-fps", cap->desired_fps, 0, US_VIDEO_MAX_FPS, 0);
case _O_MIN_FRAME_SIZE: OPT_NUMBER("--min-frame-size", cap->min_frame_size, 1, 8192, 0);
case _O_ALLOW_TRUNCATED_FRAMES: OPT_SET(cap->allow_truncated_frames, true);
case _O_PERSISTENT: OPT_SET(cap->persistent, true);
case _O_DV_TIMINGS: OPT_SET(cap->dv_timings, true);
case _O_BUFFERS: OPT_NUMBER("--buffers", cap->n_bufs, 1, 32, 0);
case _O_WORKERS: OPT_NUMBER("--workers", enc->n_workers, 1, 32, 0);
case _O_QUALITY: OPT_NUMBER("--quality", dev->jpeg_quality, 1, 100, 0);
case _O_QUALITY: OPT_NUMBER("--quality", cap->jpeg_quality, 1, 100, 0);
case _O_ENCODER: OPT_PARSE_ENUM("encoder type", enc->type, us_encoder_parse_type, ENCODER_TYPES_STR);
case _O_GLITCHED_RESOLUTIONS: break; // Deprecated
case _O_BLANK: break; // Deprecated
case _O_LAST_AS_BLANK: break; // Deprecated
case _O_SLOWDOWN: OPT_SET(stream->slowdown, true);
case _O_DEVICE_TIMEOUT: OPT_NUMBER("--device-timeout", dev->timeout, 1, 60, 0);
case _O_DEVICE_TIMEOUT: OPT_NUMBER("--device-timeout", cap->timeout, 1, 60, 0);
case _O_DEVICE_ERROR_DELAY: OPT_NUMBER("--device-error-delay", stream->error_delay, 1, 60, 0);
case _O_M2M_DEVICE: OPT_SET(enc->m2m_path, optarg);
@@ -451,6 +468,13 @@ int options_parse(us_options_s *options, us_device_s *dev, us_encoder_s *enc, us
case _O_H264_GOP: OPT_NUMBER("--h264-gop", stream->h264_gop, 0, 60, 0);
case _O_H264_M2M_DEVICE: OPT_SET(stream->h264_m2m_path, optarg);
# ifdef WITH_V4P
case _O_V4P:
options->drm = us_drm_init();
stream->drm = options->drm;
break;
# endif
# ifdef WITH_GPIO
case _O_GPIO_DEVICE: OPT_SET(us_g_gpio.path, optarg);
case _O_GPIO_CONSUMER_PREFIX: OPT_SET(us_g_gpio.consumer_prefix, optarg);
@@ -479,7 +503,7 @@ int options_parse(us_options_s *options, us_device_s *dev, us_encoder_s *enc, us
case _O_FORCE_LOG_COLORS: OPT_SET(us_g_log_colored, true);
case _O_NO_LOG_COLORS: OPT_SET(us_g_log_colored, false);
case _O_HELP: _help(stdout, dev, enc, stream, server); return 1;
case _O_HELP: _help(stdout, cap, enc, stream, server); return 1;
case _O_VERSION: puts(US_VERSION); return 1;
case _O_FEATURES: _features(); return 1;
@@ -492,7 +516,7 @@ int options_parse(us_options_s *options, us_device_s *dev, us_encoder_s *enc, us
# define ADD_SINK(x_label, x_prefix) { \
if (x_prefix##_name && x_prefix##_name[0] != '\0') { \
options->x_prefix = us_memsink_init( \
options->x_prefix = us_memsink_init_opened( \
x_label, \
x_prefix##_name, \
true, \
@@ -588,7 +612,7 @@ static void _features(void) {
# endif
}
static void _help(FILE *fp, const us_device_s *dev, const us_encoder_s *enc, const us_stream_s *stream, const us_server_s *server) {
static void _help(FILE *fp, const us_capture_s *cap, const us_encoder_s *enc, const us_stream_s *stream, const us_server_s *server) {
# define SAY(x_msg, ...) fprintf(fp, x_msg "\n", ##__VA_ARGS__)
SAY("\nuStreamer - Lightweight and fast MJPEG-HTTP streamer");
SAY("═══════════════════════════════════════════════════");
@@ -596,11 +620,13 @@ static void _help(FILE *fp, const us_device_s *dev, const us_encoder_s *enc, con
SAY("Copyright (C) 2018-2024 Maxim Devaev <mdevaev@gmail.com>\n");
SAY("Capturing options:");
SAY("══════════════════");
SAY(" -d|--device </dev/path> ───────────── Path to V4L2 device. Default: %s.\n", dev->path);
SAY(" -i|--input <N> ────────────────────── Input channel. Default: %u.\n", dev->input);
SAY(" -r|--resolution <WxH> ─────────────── Initial image resolution. Default: %ux%u.\n", dev->width, dev->height);
SAY(" -d|--device </dev/path> ───────────── Path to V4L2 device. Default: %s.\n", cap->path);
SAY(" -i|--input <N> ────────────────────── Input channel. Default: %u.\n", cap->input);
SAY(" -r|--resolution <WxH> ─────────────── Initial image resolution. Default: %ux%u.\n", cap->width, cap->height);
SAY(" -m|--format <fmt> ─────────────────── Image format.");
SAY(" Available: %s; default: YUYV.\n", US_FORMATS_STR);
SAY(" --format-swap-rgb ──────────────── Enable R-G-B order swapping: RGB to BGR and vice versa.");
SAY(" Default: disabled.\n");
SAY(" -a|--tv-standard <std> ────────────── Force TV standard.");
SAY(" Available: %s; default: disabled.\n", US_STANDARDS_STR);
SAY(" -I|--io-method <method> ───────────── Set V4L2 IO method (see kernel documentation).");
@@ -608,16 +634,18 @@ static void _help(FILE *fp, const us_device_s *dev, const us_encoder_s *enc, con
SAY(" Available: %s; default: MMAP.\n", US_IO_METHODS_STR);
SAY(" -f|--desired-fps <N> ──────────────── Desired FPS. Default: maximum possible.\n");
SAY(" -z|--min-frame-size <N> ───────────── Drop frames smaller then this limit. Useful if the device");
SAY(" produces small-sized garbage frames. Default: %zu bytes.\n", dev->min_frame_size);
SAY(" produces small-sized garbage frames. Default: %zu bytes.\n", cap->min_frame_size);
SAY(" -T|--allow-truncated-frames ───────── Allows to handle truncated frames. Useful if the device");
SAY(" produces incorrect but still acceptable frames. Default: disabled.\n");
SAY(" -n|--persistent ───────────────────── Don't re-initialize device on timeout. Default: disabled.\n");
SAY(" -t|--dv-timings ───────────────────── Enable DV-timings querying and events processing");
SAY(" to automatic resolution change. Default: disabled.\n");
SAY(" -b|--buffers <N> ──────────────────── The number of buffers to receive data from the device.");
SAY(" Each buffer may processed using an independent thread.");
SAY(" Default: %u (the number of CPU cores (but not more than 4) + 1).\n", dev->n_bufs);
SAY(" Default: %u (the number of CPU cores (but not more than 4) + 1).\n", cap->n_bufs);
SAY(" -w|--workers <N> ──────────────────── The number of worker threads but not more than buffers.");
SAY(" Default: %u (the number of CPU cores (but not more than 4)).\n", enc->n_workers);
SAY(" -q|--quality <N> ──────────────────── Set quality of JPEG encoding from 1 to 100 (best). Default: %u.", dev->jpeg_quality);
SAY(" -q|--quality <N> ──────────────────── Set quality of JPEG encoding from 1 to 100 (best). Default: %u.", cap->jpeg_quality);
SAY(" Note: If HW encoding is used (JPEG source format selected),");
SAY(" this parameter attempts to configure the camera");
SAY(" or capture device hardware's internal encoder.");
@@ -628,14 +656,13 @@ static void _help(FILE *fp, const us_device_s *dev, const us_encoder_s *enc, con
SAY(" * CPU ──────── Software MJPEG encoding (default);");
SAY(" * HW ───────── Use pre-encoded MJPEG frames directly from camera hardware;");
SAY(" * M2M-VIDEO ── GPU-accelerated MJPEG encoding using V4L2 M2M video interface;");
SAY(" * M2M-IMAGE ── GPU-accelerated JPEG encoding using V4L2 M2M image interface;");
SAY(" * NOOP ─────── Don't compress MJPEG stream (do nothing).\n");
SAY(" * M2M-IMAGE ── GPU-accelerated JPEG encoding using V4L2 M2M image interface.\n");
SAY(" -g|--glitched-resolutions <WxH,...> ─ It doesn't do anything. Still here for compatibility.\n");
SAY(" -k|--blank <path> ─────────────────── It doesn't do anything. Still here for compatibility.\n");
SAY(" -K|--last-as-blank <sec> ──────────── It doesn't do anything. Still here for compatibility.\n");
SAY(" -l|--slowdown ─────────────────────── Slowdown capturing to 1 FPS or less when no stream or sink clients");
SAY(" are connected. Useful to reduce CPU consumption. Default: disabled.\n");
SAY(" --device-timeout <sec> ────────────── Timeout for device querying. Default: %u.\n", dev->timeout);
SAY(" --device-timeout <sec> ────────────── Timeout for device querying. Default: %u.\n", cap->timeout);
SAY(" --device-error-delay <sec> ────────── Delay before trying to connect to the device again");
SAY(" after an error (timeout for example). Default: %u.\n", stream->error_delay);
SAY(" --m2m-device </dev/path> ──────────── Path to V4L2 M2M encoder device. Default: auto select.\n");
@@ -698,6 +725,12 @@ static void _help(FILE *fp, const us_device_s *dev, const us_encoder_s *enc, con
SAY(" --h264-bitrate <kbps> ───────── H264 bitrate in Kbps. Default: %u.\n", stream->h264_bitrate);
SAY(" --h264-gop <N> ──────────────── Interval between keyframes. Default: %u.\n", stream->h264_gop);
SAY(" --h264-m2m-device </dev/path> ─ Path to V4L2 M2M encoder device. Default: auto select.\n");
# ifdef WITH_V4P
SAY("Passthrough options for PiKVM V4:");
SAY("═════════════════════════════════");
SAY(" --v4p ─ Enable HDMI passthrough to OUT2 on the device: https://docs.pikvm.org/pass");
SAY(" Default: disabled.\n");
# endif
# ifdef WITH_GPIO
SAY("GPIO options:");
SAY("═════════════");

View File

@@ -39,7 +39,10 @@
#include "../libs/frame.h"
#include "../libs/memsink.h"
#include "../libs/options.h"
#include "../libs/device.h"
#include "../libs/capture.h"
#ifdef WITH_V4P
# include "../libs/drm/drm.h"
#endif
#include "encoder.h"
#include "stream.h"
@@ -56,10 +59,13 @@ typedef struct {
us_memsink_s *jpeg_sink;
us_memsink_s *raw_sink;
us_memsink_s *h264_sink;
# ifdef WITH_V4P
us_drm_s *drm;
# endif
} us_options_s;
us_options_s *us_options_init(unsigned argc, char *argv[]);
void us_options_destroy(us_options_s *options);
int options_parse(us_options_s *options, us_device_s *dev, us_encoder_s *enc, us_stream_s *stream, us_server_s *server);
int options_parse(us_options_s *options, us_capture_s *cap, us_encoder_s *enc, us_stream_s *stream, us_server_s *server);

View File

@@ -24,6 +24,7 @@
#include <stdlib.h>
#include <stdatomic.h>
#include <limits.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
@@ -31,6 +32,7 @@
#include <pthread.h>
#include "../libs/types.h"
#include "../libs/errors.h"
#include "../libs/tools.h"
#include "../libs/threading.h"
#include "../libs/process.h"
@@ -38,12 +40,17 @@
#include "../libs/ring.h"
#include "../libs/frame.h"
#include "../libs/memsink.h"
#include "../libs/device.h"
#include "../libs/capture.h"
#include "../libs/unjpeg.h"
#include "../libs/fpsi.h"
#ifdef WITH_V4P
# include "../libs/drm/drm.h"
#endif
#include "blank.h"
#include "encoder.h"
#include "workers.h"
#include "h264.h"
#include "m2m.h"
#ifdef WITH_GPIO
# include "gpio/gpio.h"
#endif
@@ -51,7 +58,7 @@
typedef struct {
pthread_t tid;
us_device_s *dev;
us_capture_s *cap;
us_queue_s *queue;
pthread_mutex_t *mutex;
atomic_bool *stop;
@@ -65,66 +72,86 @@ typedef struct {
} _worker_context_s;
static void _stream_set_capture_state(us_stream_s *stream, uint width, uint height, bool online, uint captured_fps);
static void *_releaser_thread(void *v_ctx);
static void *_jpeg_thread(void *v_ctx);
static void *_h264_thread(void *v_ctx);
static void *_raw_thread(void *v_ctx);
static void *_h264_thread(void *v_ctx);
#ifdef WITH_V4P
static void *_drm_thread(void *v_ctx);
#endif
static us_hw_buffer_s *_get_latest_hw(us_queue_s *queue);
static us_capture_hwbuf_s *_get_latest_hw(us_queue_s *queue);
static bool _stream_has_jpeg_clients_cached(us_stream_s *stream);
static bool _stream_has_any_clients_cached(us_stream_s *stream);
static int _stream_init_loop(us_stream_s *stream);
#ifdef WITH_V4P
static void _stream_drm_ensure_no_signal(us_stream_s *stream);
#endif
static void _stream_expose_jpeg(us_stream_s *stream, const us_frame_s *frame);
static void _stream_expose_raw(us_stream_s *stream, const us_frame_s *frame);
static void _stream_encode_expose_h264(us_stream_s *stream, const us_frame_s *frame, bool force_key);
static void _stream_check_suicide(us_stream_s *stream);
us_stream_s *us_stream_init(us_device_s *dev, us_encoder_s *enc) {
us_stream_s *us_stream_init(us_capture_s *cap, us_encoder_s *enc) {
us_stream_http_s *http;
US_CALLOC(http, 1);
# ifdef WITH_V4P
http->drm_fpsi = us_fpsi_init("DRM", true);
# endif
http->h264_fpsi = us_fpsi_init("H264", true);
US_RING_INIT_WITH_ITEMS(http->jpeg_ring, 4, us_frame_init);
atomic_init(&http->has_clients, false);
atomic_init(&http->snapshot_requested, 0);
atomic_init(&http->last_request_ts, 0);
http->captured_fpsi = us_fpsi_init("STREAM-CAPTURED", true);
us_stream_runtime_s *run;
US_CALLOC(run, 1);
US_RING_INIT_WITH_ITEMS(run->http_jpeg_ring, 4, us_frame_init);
atomic_init(&run->http_has_clients, false);
atomic_init(&run->http_snapshot_requested, 0);
atomic_init(&run->http_last_request_ts, 0);
atomic_init(&run->http_capture_state, 0);
atomic_init(&run->stop, false);
run->blank = us_blank_init();
run->http = http;
us_stream_s *stream;
US_CALLOC(stream, 1);
stream->dev = dev;
stream->cap = cap;
stream->enc = enc;
stream->error_delay = 1;
stream->h264_bitrate = 5000; // Kbps
stream->h264_gop = 30;
stream->run = run;
us_blank_draw(run->blank, "< NO SIGNAL >", dev->width, dev->height);
_stream_set_capture_state(stream, dev->width, dev->height, false, 0);
us_blank_draw(run->blank, "< NO SIGNAL >", cap->width, cap->height);
us_fpsi_meta_s meta = {0};
us_fpsi_frame_to_meta(run->blank->raw, &meta);
us_fpsi_update(http->captured_fpsi, false, &meta);
return stream;
}
void us_stream_destroy(us_stream_s *stream) {
us_fpsi_destroy(stream->run->http->captured_fpsi);
US_RING_DELETE_WITH_ITEMS(stream->run->http->jpeg_ring, us_frame_destroy);
us_fpsi_destroy(stream->run->http->h264_fpsi);
# ifdef WITH_V4P
us_fpsi_destroy(stream->run->http->drm_fpsi);
# endif
us_blank_destroy(stream->run->blank);
US_RING_DELETE_WITH_ITEMS(stream->run->http_jpeg_ring, us_frame_destroy);
free(stream->run->http);
free(stream->run);
free(stream);
}
void us_stream_loop(us_stream_s *stream) {
us_stream_runtime_s *const run = stream->run;
us_device_s *const dev = stream->dev;
us_capture_s *const cap = stream->cap;
US_LOG_INFO("Using V4L2 device: %s", dev->path);
US_LOG_INFO("Using desired FPS: %u", dev->desired_fps);
atomic_store(&run->http_last_request_ts, us_get_now_monotonic());
atomic_store(&run->http->last_request_ts, us_get_now_monotonic());
if (stream->h264_sink != NULL) {
run->h264 = us_h264_stream_init(stream->h264_sink, stream->h264_m2m_path, stream->h264_bitrate, stream->h264_gop);
run->h264_enc = us_m2m_h264_encoder_init("H264", stream->h264_m2m_path, stream->h264_bitrate, stream->h264_gop);
run->h264_tmp_src = us_frame_init();
run->h264_dest = us_frame_init();
}
while (!_stream_init_loop(stream)) {
@@ -133,80 +160,65 @@ void us_stream_loop(us_stream_s *stream) {
pthread_mutex_t release_mutex;
US_MUTEX_INIT(release_mutex);
const uint n_releasers = dev->run->n_bufs;
const uint n_releasers = cap->run->n_bufs;
_releaser_context_s *releasers;
US_CALLOC(releasers, n_releasers);
for (uint index = 0; index < n_releasers; ++index) {
_releaser_context_s *ctx = &releasers[index];
ctx->dev = dev;
ctx->cap = cap;
ctx->queue = us_queue_init(1);
ctx->mutex = &release_mutex;
ctx->stop = &threads_stop;
US_THREAD_CREATE(ctx->tid, _releaser_thread, ctx);
}
_worker_context_s jpeg_ctx = {
.queue = us_queue_init(dev->run->n_bufs),
.stream = stream,
.stop = &threads_stop,
};
US_THREAD_CREATE(jpeg_ctx.tid, _jpeg_thread, &jpeg_ctx);
_worker_context_s h264_ctx;
if (run->h264 != NULL) {
h264_ctx.queue = us_queue_init(dev->run->n_bufs);
h264_ctx.stream = stream;
h264_ctx.stop = &threads_stop;
US_THREAD_CREATE(h264_ctx.tid, _h264_thread, &h264_ctx);
}
_worker_context_s raw_ctx;
if (stream->raw_sink != NULL) {
raw_ctx.queue = us_queue_init(2);
raw_ctx.stream = stream;
raw_ctx.stop = &threads_stop;
US_THREAD_CREATE(raw_ctx.tid, _raw_thread, &raw_ctx);
}
uint captured_fps_accum = 0;
sll captured_fps_ts = 0;
uint captured_fps = 0;
# define CREATE_WORKER(x_cond, x_ctx, x_thread, x_capacity) \
_worker_context_s *x_ctx = NULL; \
if (x_cond) { \
US_CALLOC(x_ctx, 1); \
x_ctx->queue = us_queue_init(x_capacity); \
x_ctx->stream = stream; \
x_ctx->stop = &threads_stop; \
US_THREAD_CREATE(x_ctx->tid, (x_thread), x_ctx); \
}
CREATE_WORKER(true, jpeg_ctx, _jpeg_thread, cap->run->n_bufs);
CREATE_WORKER((stream->raw_sink != NULL), raw_ctx, _raw_thread, 2);
CREATE_WORKER((stream->h264_sink != NULL), h264_ctx, _h264_thread, cap->run->n_bufs);
# ifdef WITH_V4P
CREATE_WORKER((stream->drm != NULL), drm_ctx, _drm_thread, cap->run->n_bufs); // cppcheck-suppress assertWithSideEffect
# endif
# undef CREATE_WORKER
US_LOG_INFO("Capturing ...");
uint slowdown_count = 0;
while (!atomic_load(&run->stop) && !atomic_load(&threads_stop)) {
us_hw_buffer_s *hw;
switch (us_device_grab_buffer(dev, &hw)) {
case -2: continue; // Broken frame
case -1: goto close; // Error
default: break; // Grabbed on >= 0
us_capture_hwbuf_s *hw;
switch (us_capture_hwbuf_grab(cap, &hw)) {
case 0 ... INT_MAX: break; // Grabbed buffer number
case US_ERROR_NO_DATA: continue; // Broken frame
default: goto close; // Any error
}
const sll now_sec_ts = us_floor_ms(us_get_now_monotonic());
if (now_sec_ts != captured_fps_ts) {
captured_fps = captured_fps_accum;
captured_fps_accum = 0;
captured_fps_ts = now_sec_ts;
US_LOG_PERF_FPS("A new second has come; captured_fps=%u", captured_fps);
}
captured_fps_accum += 1;
us_fpsi_meta_s meta = {0};
us_fpsi_frame_to_meta(&hw->raw, &meta);
us_fpsi_update(run->http->captured_fpsi, true, &meta);
_stream_set_capture_state(stream, dev->run->width, dev->run->height, true, captured_fps);
# ifdef WITH_GPIO
us_gpio_set_stream_online(true);
# endif
us_device_buffer_incref(hw); // JPEG
us_queue_put(jpeg_ctx.queue, hw, 0);
if (run->h264 != NULL) {
us_device_buffer_incref(hw); // H264
us_queue_put(h264_ctx.queue, hw, 0);
}
if (stream->raw_sink != NULL) {
us_device_buffer_incref(hw); // RAW
us_queue_put(raw_ctx.queue, hw, 0);
}
# define QUEUE_HW(x_ctx) if (x_ctx != NULL) { \
us_capture_hwbuf_incref(hw); \
us_queue_put(x_ctx->queue, hw, 0); \
}
QUEUE_HW(jpeg_ctx);
QUEUE_HW(raw_ctx);
QUEUE_HW(h264_ctx);
# ifdef WITH_V4P
QUEUE_HW(drm_ctx);
# endif
# undef QUEUE_HW
us_queue_put(releasers[hw->buf.index].queue, hw, 0); // Plan to release
// Мы не обновляем здесь состояние синков, потому что это происходит внутри обслуживающих их потоков
@@ -223,18 +235,18 @@ void us_stream_loop(us_stream_s *stream) {
close:
atomic_store(&threads_stop, true);
if (stream->raw_sink != NULL) {
US_THREAD_JOIN(raw_ctx.tid);
us_queue_destroy(raw_ctx.queue);
}
if (run->h264 != NULL) {
US_THREAD_JOIN(h264_ctx.tid);
us_queue_destroy(h264_ctx.queue);
}
US_THREAD_JOIN(jpeg_ctx.tid);
us_queue_destroy(jpeg_ctx.queue);
# define DELETE_WORKER(x_ctx) if (x_ctx != NULL) { \
US_THREAD_JOIN(x_ctx->tid); \
us_queue_destroy(x_ctx->queue); \
free(x_ctx); \
}
# ifdef WITH_V4P
DELETE_WORKER(drm_ctx);
# endif
DELETE_WORKER(h264_ctx);
DELETE_WORKER(raw_ctx);
DELETE_WORKER(jpeg_ctx);
# undef DELETE_WORKER
for (uint index = 0; index < n_releasers; ++index) {
US_THREAD_JOIN(releasers[index].tid);
@@ -246,44 +258,28 @@ void us_stream_loop(us_stream_s *stream) {
atomic_store(&threads_stop, false);
us_encoder_close(stream->enc);
us_device_close(dev);
us_capture_close(cap);
if (!atomic_load(&run->stop)) {
US_SEP_INFO('=');
}
}
US_DELETE(run->h264, us_h264_stream_destroy);
US_DELETE(run->h264_enc, us_m2m_encoder_destroy);
US_DELETE(run->h264_tmp_src, us_frame_destroy);
US_DELETE(run->h264_dest, us_frame_destroy);
}
void us_stream_loop_break(us_stream_s *stream) {
atomic_store(&stream->run->stop, true);
}
void us_stream_get_capture_state(us_stream_s *stream, uint *width, uint *height, bool *online, uint *captured_fps) {
const u64 state = atomic_load(&stream->run->http_capture_state);
*width = state & 0xFFFF;
*height = (state >> 16) & 0xFFFF;
*captured_fps = (state >> 32) & 0xFFFF;
*online = (state >> 48) & 1;
}
void _stream_set_capture_state(us_stream_s *stream, uint width, uint height, bool online, uint captured_fps) {
const u64 state = (
(u64)(width & 0xFFFF)
| ((u64)(height & 0xFFFF) << 16)
| ((u64)(captured_fps & 0xFFFF) << 32)
| ((u64)(online ? 1 : 0) << 48)
);
atomic_store(&stream->run->http_capture_state, state);
}
static void *_releaser_thread(void *v_ctx) {
US_THREAD_SETTLE("str_rel")
_releaser_context_s *ctx = v_ctx;
while (!atomic_load(ctx->stop)) {
us_hw_buffer_s *hw;
us_capture_hwbuf_s *hw;
if (us_queue_get(ctx->queue, (void**)&hw, 0.1) < 0) {
continue;
}
@@ -296,7 +292,7 @@ static void *_releaser_thread(void *v_ctx) {
}
US_MUTEX_LOCK(*ctx->mutex);
const int released = us_device_release_buffer(ctx->dev, hw);
const int released = us_capture_hwbuf_release(ctx->cap, hw);
US_MUTEX_UNLOCK(*ctx->mutex);
if (released < 0) {
goto done;
@@ -317,27 +313,27 @@ static void *_jpeg_thread(void *v_ctx) {
uint fluency_passed = 0;
while (!atomic_load(ctx->stop)) {
us_worker_s *const ready_wr = us_workers_pool_wait(stream->enc->run->pool);
us_encoder_job_s *const ready_job = ready_wr->job;
us_worker_s *const wr = us_workers_pool_wait(stream->enc->run->pool);
us_encoder_job_s *const job = wr->job;
if (ready_job->hw != NULL) {
us_device_buffer_decref(ready_job->hw);
ready_job->hw = NULL;
if (ready_wr->job_failed) {
if (job->hw != NULL) {
us_capture_hwbuf_decref(job->hw);
job->hw = NULL;
if (wr->job_failed) {
// pass
} else if (ready_wr->job_timely) {
_stream_expose_jpeg(stream, ready_job->dest);
if (atomic_load(&stream->run->http_snapshot_requested) > 0) { // Process real snapshots
atomic_fetch_sub(&stream->run->http_snapshot_requested, 1);
} else if (wr->job_timely) {
_stream_expose_jpeg(stream, job->dest);
if (atomic_load(&stream->run->http->snapshot_requested) > 0) { // Process real snapshots
atomic_fetch_sub(&stream->run->http->snapshot_requested, 1);
}
US_LOG_PERF("JPEG: ##### Encoded JPEG exposed; worker=%s, latency=%.3Lf",
ready_wr->name, us_get_now_monotonic() - ready_job->dest->grab_ts);
wr->name, us_get_now_monotonic() - job->dest->grab_ts);
} else {
US_LOG_PERF("JPEG: ----- Encoded JPEG dropped; worker=%s", ready_wr->name);
US_LOG_PERF("JPEG: ----- Encoded JPEG dropped; worker=%s", wr->name);
}
}
us_hw_buffer_s *hw = _get_latest_hw(ctx->queue);
us_capture_hwbuf_s *hw = _get_latest_hw(ctx->queue);
if (hw == NULL) {
continue;
}
@@ -345,7 +341,7 @@ static void *_jpeg_thread(void *v_ctx) {
const bool update_required = (stream->jpeg_sink != NULL && us_memsink_server_check(stream->jpeg_sink, NULL));
if (!update_required && !_stream_has_jpeg_clients_cached(stream)) {
US_LOG_VERBOSE("JPEG: Passed encoding because nobody is watching");
us_device_buffer_decref(hw);
us_capture_hwbuf_decref(hw);
continue;
}
@@ -354,62 +350,18 @@ static void *_jpeg_thread(void *v_ctx) {
fluency_passed += 1;
US_LOG_VERBOSE("JPEG: Passed %u frames for fluency: now=%.03Lf, grab_after=%.03Lf",
fluency_passed, now_ts, grab_after_ts);
us_device_buffer_decref(hw);
us_capture_hwbuf_decref(hw);
continue;
}
fluency_passed = 0;
const ldf fluency_delay = us_workers_pool_get_fluency_delay(stream->enc->run->pool, ready_wr);
const ldf fluency_delay = us_workers_pool_get_fluency_delay(stream->enc->run->pool, wr);
grab_after_ts = now_ts + fluency_delay;
US_LOG_VERBOSE("JPEG: Fluency: delay=%.03Lf, grab_after=%.03Lf", fluency_delay, grab_after_ts);
ready_job->hw = hw;
us_workers_pool_assign(stream->enc->run->pool, ready_wr);
US_LOG_DEBUG("JPEG: Assigned new frame in buffer=%d to worker=%s", hw->buf.index, ready_wr->name);
}
return NULL;
}
static void *_h264_thread(void *v_ctx) {
US_THREAD_SETTLE("str_h264");
_worker_context_s *ctx = v_ctx;
us_h264_stream_s *h264 = ctx->stream->run->h264;
ldf grab_after_ts = 0;
ldf last_encode_ts = us_get_now_monotonic();
while (!atomic_load(ctx->stop)) {
us_hw_buffer_s *hw = _get_latest_hw(ctx->queue);
if (hw == NULL) {
continue;
}
if (!us_memsink_server_check(h264->sink, NULL)) {
us_device_buffer_decref(hw);
US_LOG_VERBOSE("H264: Passed encoding because nobody is watching");
continue;
}
if (hw->raw.grab_ts < grab_after_ts) {
us_device_buffer_decref(hw);
US_LOG_VERBOSE("H264: Passed encoding for FPS limit: %u", h264->enc->run->fps_limit);
continue;
}
// Форсим кейфрейм, если от захвата давно не было фреймов
const ldf now_ts = us_get_now_monotonic();
const bool force_key = (last_encode_ts + 0.5 < now_ts);
us_h264_stream_process(h264, &hw->raw, force_key);
last_encode_ts = now_ts;
// M2M-енкодер увеличивает задержку на 100 милисекунд при 1080p, если скормить ему больше 30 FPS.
// Поэтому у нас есть два режима: 60 FPS для маленьких видео и 30 для 1920x1080(1200).
// Следующй фрейм захватывается не раньше, чем это требуется по FPS, минус небольшая
// погрешность (если захват неравномерный) - немного меньше 1/60, и примерно треть от 1/30.
const ldf frame_interval = (ldf)1 / h264->enc->run->fps_limit;
grab_after_ts = hw->raw.grab_ts + frame_interval - 0.01;
us_device_buffer_decref(hw);
job->hw = hw;
us_workers_pool_assign(stream->enc->run->pool, wr);
US_LOG_DEBUG("JPEG: Assigned new frame in buffer=%d to worker=%s", hw->buf.index, wr->name);
}
return NULL;
}
@@ -419,30 +371,131 @@ static void *_raw_thread(void *v_ctx) {
_worker_context_s *ctx = v_ctx;
while (!atomic_load(ctx->stop)) {
us_hw_buffer_s *hw = _get_latest_hw(ctx->queue);
us_capture_hwbuf_s *hw = _get_latest_hw(ctx->queue);
if (hw == NULL) {
continue;
}
if (!us_memsink_server_check(ctx->stream->raw_sink, NULL)) {
us_device_buffer_decref(hw);
if (us_memsink_server_check(ctx->stream->raw_sink, NULL)) {
us_memsink_server_put(ctx->stream->raw_sink, &hw->raw, false);
} else {
US_LOG_VERBOSE("RAW: Passed publishing because nobody is watching");
continue;
}
us_memsink_server_put(ctx->stream->raw_sink, &hw->raw, false);
us_device_buffer_decref(hw);
us_capture_hwbuf_decref(hw);
}
return NULL;
}
static us_hw_buffer_s *_get_latest_hw(us_queue_s *queue) {
us_hw_buffer_s *hw;
static void *_h264_thread(void *v_ctx) {
US_THREAD_SETTLE("str_h264");
_worker_context_s *ctx = v_ctx;
us_stream_s *stream = ctx->stream;
ldf grab_after_ts = 0;
while (!atomic_load(ctx->stop)) {
us_capture_hwbuf_s *hw = _get_latest_hw(ctx->queue);
if (hw == NULL) {
continue;
}
if (!us_memsink_server_check(stream->h264_sink, NULL)) {
US_LOG_VERBOSE("H264: Passed encoding because nobody is watching");
goto decref;
}
if (hw->raw.grab_ts < grab_after_ts) {
US_LOG_DEBUG("H264: Passed encoding for FPS limit");
goto decref;
}
_stream_encode_expose_h264(ctx->stream, &hw->raw, false);
// M2M-енкодер увеличивает задержку на 100 милисекунд при 1080p, если скормить ему больше 30 FPS.
// Поэтому у нас есть два режима: 60 FPS для маленьких видео и 30 для 1920x1080(1200).
// Следующй фрейм захватывается не раньше, чем это требуется по FPS, минус небольшая
// погрешность (если захват неравномерный) - немного меньше 1/60, и примерно треть от 1/30.
const uint fps_limit = stream->run->h264_enc->run->fps_limit;
if (fps_limit > 0) {
const ldf frame_interval = (ldf)1 / fps_limit;
grab_after_ts = hw->raw.grab_ts + frame_interval - 0.01;
}
decref:
us_capture_hwbuf_decref(hw);
}
return NULL;
}
#ifdef WITH_V4P
static void *_drm_thread(void *v_ctx) {
US_THREAD_SETTLE("str_drm");
_worker_context_s *ctx = v_ctx;
us_stream_s *stream = ctx->stream;
// Close previously opened DRM for a stub
us_drm_close(stream->drm);
us_capture_hwbuf_s *prev_hw = NULL;
while (!atomic_load(ctx->stop)) {
# define CHECK(x_arg) if ((x_arg) < 0) { goto close; }
# define SLOWDOWN { \
const ldf m_next_ts = us_get_now_monotonic() + 1; \
while (!atomic_load(ctx->stop) && us_get_now_monotonic() < m_next_ts) { \
us_capture_hwbuf_s *m_pass_hw = _get_latest_hw(ctx->queue); \
if (m_pass_hw != NULL) { \
us_capture_hwbuf_decref(m_pass_hw); \
} \
} \
}
CHECK(us_drm_open(stream->drm, ctx->stream->cap));
while (!atomic_load(ctx->stop)) {
CHECK(us_drm_wait_for_vsync(stream->drm));
US_DELETE(prev_hw, us_capture_hwbuf_decref);
us_capture_hwbuf_s *hw = _get_latest_hw(ctx->queue);
if (hw == NULL) {
continue;
}
if (stream->drm->run->opened == 0) {
CHECK(us_drm_expose_dma(stream->drm, hw));
prev_hw = hw;
us_fpsi_meta_s meta = {.online = true}; // Online means live video
us_fpsi_update(stream->run->http->drm_fpsi, true, &meta);
continue;
}
CHECK(us_drm_expose_stub(stream->drm, stream->drm->run->opened, ctx->stream->cap));
us_capture_hwbuf_decref(hw);
us_fpsi_meta_s meta = {.online = false};
us_fpsi_update(stream->run->http->drm_fpsi, true, &meta);
SLOWDOWN;
}
close:
us_drm_close(stream->drm);
US_DELETE(prev_hw, us_capture_hwbuf_decref);
us_fpsi_meta_s meta = {.online = false};
us_fpsi_update(stream->run->http->drm_fpsi, false, &meta);
SLOWDOWN;
# undef SLOWDOWN
# undef CHECK
}
return NULL;
}
#endif
static us_capture_hwbuf_s *_get_latest_hw(us_queue_s *queue) {
us_capture_hwbuf_s *hw;
if (us_queue_get(queue, (void**)&hw, 0.1) < 0) {
return NULL;
}
while (!us_queue_is_empty(queue)) { // Берем только самый свежий кадр
us_device_buffer_decref(hw);
us_capture_hwbuf_decref(hw);
assert(!us_queue_get(queue, (void**)&hw, 0));
}
return hw;
@@ -451,25 +504,27 @@ static us_hw_buffer_s *_get_latest_hw(us_queue_s *queue) {
static bool _stream_has_jpeg_clients_cached(us_stream_s *stream) {
const us_stream_runtime_s *const run = stream->run;
return (
atomic_load(&run->http_has_clients)
|| (atomic_load(&run->http_snapshot_requested) > 0)
atomic_load(&run->http->has_clients)
|| (atomic_load(&run->http->snapshot_requested) > 0)
|| (stream->jpeg_sink != NULL && atomic_load(&stream->jpeg_sink->has_clients))
);
}
static bool _stream_has_any_clients_cached(us_stream_s *stream) {
const us_stream_runtime_s *const run = stream->run;
return (
_stream_has_jpeg_clients_cached(stream)
|| (run->h264 != NULL && atomic_load(&run->h264->sink->has_clients))
|| (stream->h264_sink != NULL && atomic_load(&stream->h264_sink->has_clients))
|| (stream->raw_sink != NULL && atomic_load(&stream->raw_sink->has_clients))
# ifdef WITH_V4P
|| (stream->drm != NULL)
# endif
);
}
static int _stream_init_loop(us_stream_s *stream) {
us_stream_runtime_s *const run = stream->run;
bool waiting_reported = false;
int once = 0;
while (!atomic_load(&stream->run->stop)) {
# ifdef WITH_GPIO
us_gpio_set_stream_online(false);
@@ -477,36 +532,33 @@ static int _stream_init_loop(us_stream_s *stream) {
// Флаги has_clients у синков не обновляются сами по себе, поэтому обновим их
// на каждой итерации старта стрима. После старта этим будут заниматься воркеры.
if (stream->jpeg_sink != NULL) {
us_memsink_server_check(stream->jpeg_sink, NULL);
}
if (stream->run->h264 != NULL) {
us_memsink_server_check(stream->run->h264->sink, NULL);
}
if (stream->raw_sink != NULL) {
us_memsink_server_check(stream->raw_sink, NULL);
}
# define UPDATE_SINK(x_sink) if (x_sink != NULL) { us_memsink_server_check(x_sink, NULL); }
UPDATE_SINK(stream->jpeg_sink);
UPDATE_SINK(stream->raw_sink);
UPDATE_SINK(stream->h264_sink);
# undef UPDATE_SINK
_stream_check_suicide(stream);
stream->dev->dma_export = (
stream->cap->dma_export = (
stream->enc->type == US_ENCODER_TYPE_M2M_VIDEO
|| stream->enc->type == US_ENCODER_TYPE_M2M_IMAGE
|| run->h264 != NULL
|| stream->h264_sink != NULL
# ifdef WITH_V4P
|| stream->drm != NULL
# endif
);
switch (us_device_open(stream->dev)) {
case -2:
if (!waiting_reported) {
waiting_reported = true;
US_LOG_INFO("Waiting for the capture device ...");
}
switch (us_capture_open(stream->cap)) {
case 0: break;
case US_ERROR_NO_DEVICE:
case US_ERROR_NO_DATA:
US_ONCE({ US_LOG_INFO("Waiting for the capture device ..."); });
goto offline_and_retry;
case -1:
waiting_reported = false;
default:
once = 0;
goto offline_and_retry;
default: break;
}
us_encoder_open(stream->enc, stream->dev);
us_encoder_open(stream->enc, stream->cap);
return 0;
offline_and_retry:
@@ -516,21 +568,25 @@ static int _stream_init_loop(us_stream_s *stream) {
}
if (count % 10 == 0) {
// Каждую секунду повторяем blank
uint width = stream->dev->run->width;
uint height = stream->dev->run->height;
uint width = stream->cap->run->width;
uint height = stream->cap->run->height;
if (width == 0 || height == 0) {
width = stream->dev->width;
height = stream->dev->height;
width = stream->cap->width;
height = stream->cap->height;
}
us_blank_draw(run->blank, "< NO SIGNAL >", width, height);
_stream_set_capture_state(stream, width, height, false, 0);
us_fpsi_meta_s meta = {0};
us_fpsi_frame_to_meta(run->blank->raw, &meta);
us_fpsi_update(run->http->captured_fpsi, false, &meta);
_stream_expose_jpeg(stream, run->blank->jpeg);
if (run->h264 != NULL) {
us_h264_stream_process(run->h264, run->blank->raw, true);
}
_stream_expose_raw(stream, run->blank->raw);
_stream_encode_expose_h264(stream, run->blank->raw, true);
# ifdef WITH_V4P
_stream_drm_ensure_no_signal(stream);
# endif
}
usleep(100 * 1000);
}
@@ -538,17 +594,42 @@ static int _stream_init_loop(us_stream_s *stream) {
return -1;
}
#ifdef WITH_V4P
static void _stream_drm_ensure_no_signal(us_stream_s *stream) {
if (stream->drm == NULL) {
return;
}
const us_fpsi_meta_s meta = {.online = false};
if (stream->drm->run->opened <= 0) {
us_drm_close(stream->drm);
if (us_drm_open(stream->drm, NULL) < 0) {
goto close;
}
}
if (us_drm_ensure_no_signal(stream->drm) < 0) {
goto close;
}
us_fpsi_update(stream->run->http->drm_fpsi, true, &meta);
return;
close:
us_fpsi_update(stream->run->http->drm_fpsi, false, &meta);
us_drm_close(stream->drm);
}
#endif
static void _stream_expose_jpeg(us_stream_s *stream, const us_frame_s *frame) {
us_stream_runtime_s *const run = stream->run;
int ri;
while ((ri = us_ring_producer_acquire(run->http_jpeg_ring, 0)) < 0) {
while ((ri = us_ring_producer_acquire(run->http->jpeg_ring, 0)) < 0) {
if (atomic_load(&run->stop)) {
return;
}
}
us_frame_s *const dest = run->http_jpeg_ring->items[ri];
us_frame_s *const dest = run->http->jpeg_ring->items[ri];
us_frame_copy(frame, dest);
us_ring_producer_release(run->http_jpeg_ring, ri);
us_ring_producer_release(run->http->jpeg_ring, ri);
if (stream->jpeg_sink != NULL) {
us_memsink_server_put(stream->jpeg_sink, dest, NULL);
}
@@ -560,19 +641,46 @@ static void _stream_expose_raw(us_stream_s *stream, const us_frame_s *frame) {
}
}
static void _stream_encode_expose_h264(us_stream_s *stream, const us_frame_s *frame, bool force_key) {
if (stream->h264_sink == NULL) {
return;
}
us_stream_runtime_s *run = stream->run;
us_fpsi_meta_s meta = {.online = false};
if (us_is_jpeg(frame->format)) {
if (us_unjpeg(frame, run->h264_tmp_src, true) < 0) {
goto done;
}
frame = run->h264_tmp_src;
}
if (run->h264_key_requested) {
US_LOG_INFO("H264: Requested keyframe by a sink client");
run->h264_key_requested = false;
force_key = true;
}
if (!us_m2m_encoder_compress(run->h264_enc, frame, run->h264_dest, force_key)) {
meta.online = !us_memsink_server_put(stream->h264_sink, run->h264_dest, &run->h264_key_requested);
}
done:
us_fpsi_update(run->http->h264_fpsi, meta.online, &meta);
}
static void _stream_check_suicide(us_stream_s *stream) {
if (stream->exit_on_no_clients == 0) {
return;
}
us_stream_runtime_s *const run = stream->run;
const ldf now_ts = us_get_now_monotonic();
const ull http_last_request_ts = atomic_load(&run->http_last_request_ts); // Seconds
const ull http_last_request_ts = atomic_load(&run->http->last_request_ts); // Seconds
if (_stream_has_any_clients_cached(stream)) {
atomic_store(&run->http_last_request_ts, now_ts);
atomic_store(&run->http->last_request_ts, now_ts);
} else if (http_last_request_ts + stream->exit_on_no_clients < now_ts) {
US_LOG_INFO("No requests or HTTP/sink clients found in last %u seconds, exiting ...",
stream->exit_on_no_clients);
us_process_suicide();
atomic_store(&run->http_last_request_ts, now_ts);
atomic_store(&run->http->last_request_ts, now_ts);
}
}

View File

@@ -29,33 +29,52 @@
#include "../libs/types.h"
#include "../libs/queue.h"
#include "../libs/ring.h"
#include "../libs/frame.h"
#include "../libs/memsink.h"
#include "../libs/device.h"
#include "../libs/capture.h"
#include "../libs/fpsi.h"
#ifdef WITH_V4P
# include "../libs/drm/drm.h"
#endif
#include "blank.h"
#include "encoder.h"
#include "h264.h"
#include "m2m.h"
typedef struct {
us_h264_stream_s *h264;
# ifdef WITH_V4P
atomic_bool drm_live;
us_fpsi_s *drm_fpsi;
# endif
us_ring_s *http_jpeg_ring;
atomic_bool http_has_clients;
atomic_uint http_snapshot_requested;
atomic_ullong http_last_request_ts; // Seconds
atomic_ullong http_capture_state; // Bits
atomic_bool h264_online;
us_fpsi_s *h264_fpsi;
us_blank_s *blank;
us_ring_s *jpeg_ring;
atomic_bool has_clients;
atomic_uint snapshot_requested;
atomic_ullong last_request_ts; // Seconds
us_fpsi_s *captured_fpsi;
} us_stream_http_s;
atomic_bool stop;
typedef struct {
us_stream_http_s *http;
us_m2m_encoder_s *h264_enc;
us_frame_s *h264_tmp_src;
us_frame_s *h264_dest;
bool h264_key_requested;
us_blank_s *blank;
atomic_bool stop;
} us_stream_runtime_s;
typedef struct {
us_device_s *dev;
us_capture_s *cap;
us_encoder_s *enc;
int last_as_blank;
bool slowdown;
uint error_delay;
uint exit_on_no_clients;
@@ -68,14 +87,16 @@ typedef struct {
uint h264_gop;
char *h264_m2m_path;
# ifdef WITH_V4P
us_drm_s *drm;
# endif
us_stream_runtime_s *run;
} us_stream_s;
us_stream_s *us_stream_init(us_device_s *dev, us_encoder_s *enc);
us_stream_s *us_stream_init(us_capture_s *cap, us_encoder_s *enc);
void us_stream_destroy(us_stream_s *stream);
void us_stream_loop(us_stream_s *stream);
void us_stream_loop_break(us_stream_s *stream);
void us_stream_get_capture_state(us_stream_s *stream, uint *width, uint *height, bool *online, uint *captured_fps);

View File

@@ -22,12 +22,22 @@
#include "workers.h"
#include <stdatomic.h>
#include <pthread.h>
#include "../libs/types.h"
#include "../libs/tools.h"
#include "../libs/threading.h"
#include "../libs/logging.h"
#include "../libs/list.h"
static void *_worker_thread(void *v_worker);
us_workers_pool_s *us_workers_pool_init(
const char *name, const char *wr_prefix, unsigned n_workers, long double desired_interval,
const char *name, const char *wr_prefix, uint n_workers, ldf desired_interval,
us_workers_pool_job_init_f job_init, void *job_init_arg,
us_workers_pool_job_destroy_f job_destroy,
us_workers_pool_run_job_f run_job) {
@@ -44,28 +54,28 @@ us_workers_pool_s *us_workers_pool_init(
atomic_init(&pool->stop, false);
pool->n_workers = n_workers;
US_CALLOC(pool->workers, pool->n_workers);
US_MUTEX_INIT(pool->free_workers_mutex);
US_COND_INIT(pool->free_workers_cond);
for (unsigned number = 0; number < pool->n_workers; ++number) {
# define WR(x_next) pool->workers[number].x_next
for (uint index = 0; index < pool->n_workers; ++index) {
us_worker_s *wr;
US_CALLOC(wr, 1);
WR(number) = number;
US_ASPRINTF(WR(name), "%s-%u", wr_prefix, number);
wr->number = index;
US_ASPRINTF(wr->name, "%s-%u", wr_prefix, index);
US_MUTEX_INIT(WR(has_job_mutex));
atomic_init(&WR(has_job), false);
US_COND_INIT(WR(has_job_cond));
US_MUTEX_INIT(wr->has_job_mutex);
atomic_init(&wr->has_job, false);
US_COND_INIT(wr->has_job_cond);
WR(pool) = pool;
WR(job) = job_init(job_init_arg);
wr->pool = pool;
wr->job = job_init(job_init_arg);
US_THREAD_CREATE(WR(tid), _worker_thread, (void*)&(pool->workers[number]));
US_THREAD_CREATE(wr->tid, _worker_thread, (void*)wr);
pool->free_workers += 1;
# undef WR
US_LIST_APPEND(pool->workers, wr);
}
return pool;
}
@@ -74,98 +84,70 @@ void us_workers_pool_destroy(us_workers_pool_s *pool) {
US_LOG_INFO("Destroying workers pool %s ...", pool->name);
atomic_store(&pool->stop, true);
for (unsigned number = 0; number < pool->n_workers; ++number) {
# define WR(x_next) pool->workers[number].x_next
US_LIST_ITERATE(pool->workers, wr, { // cppcheck-suppress constStatement
US_MUTEX_LOCK(wr->has_job_mutex);
atomic_store(&wr->has_job, true); // Final job: die
US_MUTEX_UNLOCK(wr->has_job_mutex);
US_COND_SIGNAL(wr->has_job_cond);
US_MUTEX_LOCK(WR(has_job_mutex));
atomic_store(&WR(has_job), true); // Final job: die
US_MUTEX_UNLOCK(WR(has_job_mutex));
US_COND_SIGNAL(WR(has_job_cond));
US_THREAD_JOIN(wr->tid);
US_MUTEX_DESTROY(wr->has_job_mutex);
US_COND_DESTROY(wr->has_job_cond);
US_THREAD_JOIN(WR(tid));
US_MUTEX_DESTROY(WR(has_job_mutex));
US_COND_DESTROY(WR(has_job_cond));
pool->job_destroy(wr->job);
free(WR(name));
pool->job_destroy(WR(job));
# undef WR
}
free(wr->name);
free(wr);
});
US_MUTEX_DESTROY(pool->free_workers_mutex);
US_COND_DESTROY(pool->free_workers_cond);
free(pool->workers);
free(pool);
}
us_worker_s *us_workers_pool_wait(us_workers_pool_s *pool) {
us_worker_s *ready_wr = NULL;
US_MUTEX_LOCK(pool->free_workers_mutex);
US_COND_WAIT_FOR(pool->free_workers, pool->free_workers_cond, pool->free_workers_mutex);
US_MUTEX_UNLOCK(pool->free_workers_mutex);
if (pool->oldest_wr && !atomic_load(&pool->oldest_wr->has_job)) {
ready_wr = pool->oldest_wr;
ready_wr->job_timely = true;
pool->oldest_wr = pool->oldest_wr->next_wr;
} else {
for (unsigned number = 0; number < pool->n_workers; ++number) {
if (
!atomic_load(&pool->workers[number].has_job) && (
ready_wr == NULL
|| ready_wr->job_start_ts < pool->workers[number].job_start_ts
)
) {
ready_wr = &pool->workers[number];
break;
}
us_worker_s *found = NULL;
US_LIST_ITERATE(pool->workers, wr, { // cppcheck-suppress constStatement
if (!atomic_load(&wr->has_job) && (found == NULL || found->job_start_ts <= wr->job_start_ts)) {
found = wr;
}
assert(ready_wr != NULL);
ready_wr->job_timely = false; // Освободился воркер, получивший задание позже (или самый первый при самом первом захвате)
});
assert(found != NULL);
US_LIST_REMOVE(pool->workers, found);
US_LIST_APPEND(pool->workers, found); // Перемещаем в конец списка
found->job_timely = (found->job_start_ts > pool->job_timely_ts);
if (found->job_timely) {
pool->job_timely_ts = found->job_start_ts;
}
return ready_wr;
return found;
}
void us_workers_pool_assign(us_workers_pool_s *pool, us_worker_s *ready_wr/*, void *job*/) {
if (pool->oldest_wr == NULL) {
pool->oldest_wr = ready_wr;
pool->latest_wr = pool->oldest_wr;
} else {
if (ready_wr->next_wr != NULL) {
ready_wr->next_wr->prev_wr = ready_wr->prev_wr;
}
if (ready_wr->prev_wr != NULL) {
ready_wr->prev_wr->next_wr = ready_wr->next_wr;
}
ready_wr->prev_wr = pool->latest_wr;
pool->latest_wr->next_wr = ready_wr;
pool->latest_wr = ready_wr;
}
pool->latest_wr->next_wr = NULL;
US_MUTEX_LOCK(ready_wr->has_job_mutex);
//ready_wr->job = job;
atomic_store(&ready_wr->has_job, true);
US_MUTEX_UNLOCK(ready_wr->has_job_mutex);
US_COND_SIGNAL(ready_wr->has_job_cond);
void us_workers_pool_assign(us_workers_pool_s *pool, us_worker_s *wr) {
US_MUTEX_LOCK(wr->has_job_mutex);
atomic_store(&wr->has_job, true);
US_MUTEX_UNLOCK(wr->has_job_mutex);
US_COND_SIGNAL(wr->has_job_cond);
US_MUTEX_LOCK(pool->free_workers_mutex);
pool->free_workers -= 1;
US_MUTEX_UNLOCK(pool->free_workers_mutex);
}
long double us_workers_pool_get_fluency_delay(us_workers_pool_s *pool, const us_worker_s *ready_wr) {
const long double approx_job_time = pool->approx_job_time * 0.9 + ready_wr->last_job_time * 0.1;
ldf us_workers_pool_get_fluency_delay(us_workers_pool_s *pool, const us_worker_s *wr) {
const ldf approx_job_time = pool->approx_job_time * 0.9 + wr->last_job_time * 0.1;
US_LOG_VERBOSE("Correcting pool's %s approx_job_time: %.3Lf -> %.3Lf (last_job_time=%.3Lf)",
pool->name, pool->approx_job_time, approx_job_time, ready_wr->last_job_time);
pool->name, pool->approx_job_time, approx_job_time, wr->last_job_time);
pool->approx_job_time = approx_job_time;
const long double min_delay = pool->approx_job_time / pool->n_workers; // Среднее время работы размазывается на N воркеров
const ldf min_delay = pool->approx_job_time / pool->n_workers; // Среднее время работы размазывается на N воркеров
if (pool->desired_interval > 0 && min_delay > 0 && pool->desired_interval > min_delay) {
// Искусственное время задержки на основе желаемого FPS, если включен --desired-fps
@@ -176,7 +158,7 @@ long double us_workers_pool_get_fluency_delay(us_workers_pool_s *pool, const us_
}
static void *_worker_thread(void *v_worker) {
us_worker_s *wr = v_worker;
us_worker_s *const wr = v_worker;
US_THREAD_SETTLE("%s", wr->name);
US_LOG_DEBUG("Hello! I am a worker %s ^_^", wr->name);
@@ -189,13 +171,12 @@ static void *_worker_thread(void *v_worker) {
US_MUTEX_UNLOCK(wr->has_job_mutex);
if (!atomic_load(&wr->pool->stop)) {
const long double job_start_ts = us_get_now_monotonic();
const ldf job_start_ts = us_get_now_monotonic();
wr->job_failed = !wr->pool->run_job(wr);
if (!wr->job_failed) {
wr->job_start_ts = job_start_ts;
wr->last_job_time = us_get_now_monotonic() - wr->job_start_ts;
}
//wr->job = NULL;
atomic_store(&wr->has_job, false);
}

View File

@@ -22,37 +22,32 @@
#pragma once
#include <stdbool.h>
#include <stdatomic.h>
#include <sys/types.h>
#include <pthread.h>
#include "../libs/tools.h"
#include "../libs/threading.h"
#include "../libs/logging.h"
#include "../libs/types.h"
#include "../libs/list.h"
typedef struct us_worker_sx {
pthread_t tid;
unsigned number;
char *name;
pthread_t tid;
uint number;
char *name;
long double last_job_time;
ldf last_job_time;
pthread_mutex_t has_job_mutex;
void *job;
atomic_bool has_job;
bool job_timely;
bool job_failed;
long double job_start_ts;
ldf job_start_ts;
pthread_cond_t has_job_cond;
struct us_worker_sx *prev_wr;
struct us_worker_sx *next_wr;
struct us_workers_pool_sx *pool;
US_LIST_DECLARE;
} us_worker_s;
typedef void *(*us_workers_pool_job_init_f)(void *arg);
@@ -61,20 +56,19 @@ typedef bool (*us_workers_pool_run_job_f)(us_worker_s *wr);
typedef struct us_workers_pool_sx {
const char *name;
long double desired_interval;
ldf desired_interval;
us_workers_pool_job_destroy_f job_destroy;
us_workers_pool_run_job_f run_job;
unsigned n_workers;
uint n_workers;
us_worker_s *workers;
us_worker_s *oldest_wr;
us_worker_s *latest_wr;
ldf job_timely_ts;
long double approx_job_time;
ldf approx_job_time;
pthread_mutex_t free_workers_mutex;
unsigned free_workers;
uint free_workers;
pthread_cond_t free_workers_cond;
atomic_bool stop;
@@ -82,7 +76,7 @@ typedef struct us_workers_pool_sx {
us_workers_pool_s *us_workers_pool_init(
const char *name, const char *wr_prefix, unsigned n_workers, long double desired_interval,
const char *name, const char *wr_prefix, uint n_workers, ldf desired_interval,
us_workers_pool_job_init_f job_init, void *job_init_arg,
us_workers_pool_job_destroy_f job_destroy,
us_workers_pool_run_job_f run_job);
@@ -90,6 +84,6 @@ us_workers_pool_s *us_workers_pool_init(
void us_workers_pool_destroy(us_workers_pool_s *pool);
us_worker_s *us_workers_pool_wait(us_workers_pool_s *pool);
void us_workers_pool_assign(us_workers_pool_s *pool, us_worker_s *ready_wr/*, void *job*/);
void us_workers_pool_assign(us_workers_pool_s *pool, us_worker_s *ready_wr);
long double us_workers_pool_get_fluency_delay(us_workers_pool_s *pool, const us_worker_s *ready_wr);
ldf us_workers_pool_get_fluency_delay(us_workers_pool_s *pool, const us_worker_s *ready_wr);

View File

@@ -23,6 +23,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <stdatomic.h>
#include <limits.h>
#include <string.h>
#include <unistd.h>
#include <getopt.h>
@@ -36,14 +37,15 @@
#include <sys/stat.h>
#include "../libs/types.h"
#include "../libs/errors.h"
#include "../libs/const.h"
#include "../libs/tools.h"
#include "../libs/logging.h"
#include "../libs/device.h"
#include "../libs/capture.h"
#include "../libs/signal.h"
#include "../libs/options.h"
#include "drm.h"
#include "../libs/drm/drm.h"
enum _OPT_VALUES {
@@ -160,60 +162,44 @@ static void _signal_handler(int signum) {
static void _main_loop(void) {
us_drm_s *drm = us_drm_init();
drm->port = "HDMI-A-2";
us_device_s *dev = us_device_init();
dev->path = "/dev/kvmd-video";
dev->n_bufs = 6;
dev->format = V4L2_PIX_FMT_RGB24;
dev->dv_timings = true;
dev->persistent = true;
dev->dma_export = true;
dev->dma_required = true;
us_capture_s *cap = us_capture_init();
cap->path = "/dev/kvmd-video";
cap->n_bufs = 6;
cap->format = V4L2_PIX_FMT_RGB24;
cap->format_swap_rgb = true;
cap->dv_timings = true;
cap->persistent = true;
cap->dma_export = true;
cap->dma_required = true;
int once = 0;
ldf blank_at_ts = 0;
int drm_opened = -1;
while (!atomic_load(&_g_stop)) {
# define CHECK(x_arg) if ((x_arg) < 0) { goto close; }
if (drm_opened <= 0) {
blank_at_ts = 0;
CHECK(drm_opened = us_drm_open(drm, NULL));
if (drm->run->opened <= 0) {
CHECK(us_drm_open(drm, NULL));
}
assert(drm_opened > 0);
if (atomic_load(&_g_ustreamer_online)) {
blank_at_ts = 0;
US_ONCE({ US_LOG_INFO("DRM: Online stream is active, stopping capture ..."); });
US_ONCE({ US_LOG_INFO("DRM: Online stream is active, pausing the service ..."); });
CHECK(us_drm_wait_for_vsync(drm));
CHECK(us_drm_expose_stub(drm, US_DRM_STUB_BUSY, NULL));
_slowdown();
continue;
}
if (us_device_open(dev) < 0) {
ldf now_ts = us_get_now_monotonic();
if (blank_at_ts == 0) {
blank_at_ts = now_ts + 5;
}
if (now_ts <= blank_at_ts) {
CHECK(us_drm_wait_for_vsync(drm));
CHECK(us_drm_expose_stub(drm, US_DRM_STUB_NO_SIGNAL, NULL));
} else {
US_ONCE({ US_LOG_INFO("DRM: Turning off the display by timeout ..."); });
CHECK(us_drm_dpms_power_off(drm));
}
if (us_capture_open(cap) < 0) {
CHECK(us_drm_ensure_no_signal(drm));
_slowdown();
continue;
}
once = 0;
blank_at_ts = 0;
us_drm_close(drm);
CHECK(drm_opened = us_drm_open(drm, dev));
CHECK(us_drm_open(drm, cap));
us_hw_buffer_s *prev_hw = NULL;
us_capture_hwbuf_s *prev_hw = NULL;
while (!atomic_load(&_g_stop)) {
if (atomic_load(&_g_ustreamer_online)) {
goto close;
@@ -222,42 +208,37 @@ static void _main_loop(void) {
CHECK(us_drm_wait_for_vsync(drm));
if (prev_hw != NULL) {
CHECK(us_device_release_buffer(dev, prev_hw));
CHECK(us_capture_hwbuf_release(cap, prev_hw));
prev_hw = NULL;
}
us_hw_buffer_s *hw;
switch (us_device_grab_buffer(dev, &hw)) {
case -2: continue; // Broken frame
case -1: goto close; // Any error
default: break; // Grabbed on >= 0
us_capture_hwbuf_s *hw;
switch (us_capture_hwbuf_grab(cap, &hw)) {
case 0 ... INT_MAX: break; // Grabbed buffer number
case US_ERROR_NO_DATA: continue; // Broken frame
default: goto close; // Any error
}
if (drm_opened == 0) {
if (drm->run->opened == 0) {
CHECK(us_drm_expose_dma(drm, hw));
prev_hw = hw;
} else {
CHECK(us_drm_expose_stub(drm, drm_opened, dev));
CHECK(us_device_release_buffer(dev, hw));
continue;
}
if (drm_opened > 0) {
_slowdown();
}
CHECK(us_drm_expose_stub(drm, drm->run->opened, cap));
CHECK(us_capture_hwbuf_release(cap, hw));
_slowdown();
}
close:
us_drm_close(drm);
drm_opened = -1;
us_device_close(dev);
us_capture_close(cap);
_slowdown();
# undef CHECK
}
us_device_destroy(dev);
us_capture_destroy(cap);
us_drm_destroy(drm);
}