Compare commits

...

58 Commits
v5.56 ... v6.0

Author SHA1 Message Date
Maxim Devaev
e1ef86146f Bump version: 5.59 → 6.0 2024-03-06 21:50:47 +02:00
Maxim Devaev
8f3a475a32 Bump version: 5.58 → 5.59 2024-03-06 20:56:38 +02:00
Maxim Devaev
be5f63d64d noted TC358743 errors 2024-03-06 01:02:52 +02:00
Maxim Devaev
40e17b05b3 memsink client: bump last_as_blank_ts on every flock() 2024-03-06 00:43:15 +02:00
Maxim Devaev
0b8940d93d limit fps by m2m hardware to reduce latency 2024-03-05 21:49:39 +02:00
Maxim Devaev
e92002c3d8 repeat blank every second on offline 2024-03-05 14:32:45 +02:00
Maxim Devaev
e558b0f1a1 Issue #264: Bring back BSD compatibility in strerror hacks 2024-03-05 14:19:55 +02:00
Maxim Devaev
b5784149b2 fix 2024-03-05 13:49:18 +02:00
Maxim Devaev
55b6a3e933 improved logging 2024-03-05 13:47:29 +02:00
Maxim Devaev
f7c2948477 improved memsink checks performance 2024-03-05 13:44:54 +02:00
Maxim Devaev
c55b6c4d7d lint fix 2024-03-04 17:39:39 +02:00
Maxim Devaev
442790486c fixed empty label for goto 2024-03-04 17:19:30 +02:00
Maxim Devaev
bbc7ceb110 fix 2024-03-04 08:32:09 +02:00
Maxim Devaev
2ffa561eb1 reduced snapshot timeout to error_delay*3 2024-03-04 07:47:14 +02:00
Maxim Devaev
490d833983 refactoring 2024-03-04 07:22:48 +02:00
Maxim Devaev
0b3a1eb963 deprecated --last-as-blank 2024-03-04 07:18:45 +02:00
Maxim Devaev
7fd5eb229f fixed offline state 2024-03-04 03:54:16 +02:00
Maxim Devaev
98b5e52a68 block signals in threads 2024-03-04 03:38:45 +02:00
Maxim Devaev
c8dc5119fe signal lib to reduce duplicating code 2024-03-04 03:12:14 +02:00
Maxim Devaev
b556dfb897 improved persistent logic 2024-03-04 01:50:34 +02:00
Maxim Devaev
06eda04180 always generate blanks for offline snapshots 2024-03-03 22:15:25 +02:00
Maxim Devaev
05bba86c63 refactoring 2024-03-03 21:28:25 +02:00
Maxim Devaev
6827a72097 failed if dv timings are not available 2024-03-03 20:04:47 +02:00
Maxim Devaev
299b3886af improved messages 2024-03-03 19:02:59 +02:00
Maxim Devaev
f9bc5666b8 refactoring 2024-03-03 18:44:13 +02:00
Maxim Devaev
c9cb0a416e fixed persistent timeout 2024-03-03 08:23:18 +02:00
Maxim Devaev
ffa68a86a6 refactoring 2024-03-03 08:10:33 +02:00
Maxim Devaev
8fe411aa8b compress only lastest frame 2024-03-03 07:05:00 +02:00
Maxim Devaev
36dd5d1533 pass encoders if there is no clients 2024-03-03 06:27:19 +02:00
Maxim Devaev
33b9bff0b9 atomic capture state for http 2024-03-03 06:04:48 +02:00
Maxim Devaev
c24d6338e2 Issue #228: Request fresh snapshot from jpeg encoder 2024-03-03 04:59:12 +02:00
Maxim Devaev
8cb6fc4e78 refactoring 2024-03-03 03:24:40 +02:00
Maxim Devaev
a9dfff84e6 fix 2024-03-03 02:55:13 +02:00
Maxim Devaev
988a91634a fixed force_key logic for slowdown 2024-03-03 02:51:53 +02:00
Maxim Devaev
8f6df3b455 Issue #263: -latomic is required now 2024-03-03 01:45:16 +02:00
Maxim Devaev
ef47fa4c74 jpeg in a separate thread 2024-03-03 01:04:06 +02:00
Maxim Devaev
f2f560a345 h264 encoder in separate thread 2024-03-02 23:06:06 +02:00
Maxim Devaev
6a0ee68692 renamed captured_fps to http_captured_fps 2024-03-02 21:33:55 +02:00
Maxim Devaev
72741b90f4 releaser threads 2024-03-02 21:07:46 +02:00
Maxim Devaev
0296ab60c3 added device timeout error message 2024-03-02 20:16:34 +02:00
Maxim Devaev
77a53347c3 refactoring 2024-03-02 19:37:50 +02:00
Maxim Devaev
c32ea286f2 Bump version: 5.57 → 5.58 2024-03-02 19:11:06 +02:00
Maxim Devaev
b2fb857f5b refactoring 2024-03-02 19:09:53 +02:00
Maxim Devaev
20cdabc8a4 lint fix 2024-03-02 10:42:30 +02:00
Maxim Devaev
e2f4c193e3 refactoring 2024-03-02 10:08:06 +02:00
Maxim Devaev
b4aa9593dc refactoring 2024-03-02 09:59:41 +02:00
Maxim Devaev
20c729893b refactoring 2024-03-02 09:56:44 +02:00
Maxim Devaev
a00f49331c wait (select) device in grab function 2024-03-02 09:33:45 +02:00
Maxim Devaev
85308e48fd stream: null hw buffer pointer after encoding 2024-03-02 07:54:38 +02:00
Maxim Devaev
ff08a0fb25 refactoring 2024-03-02 07:23:18 +02:00
Maxim Devaev
6145b69c97 refactoring 2024-03-02 02:09:54 +02:00
Maxim Devaev
cfc5ae1b94 v4p: using /dev/dri/by-path/platform-gpu-card instead of card0 2024-03-02 01:14:15 +02:00
Maxim Devaev
54b221aabd moved exit_on_no_clients logic from http to stream loop 2024-03-01 09:40:51 +02:00
Maxim Devaev
dabee9d47a gitignores ustreamer-v4p 2024-03-01 08:22:14 +02:00
Maxim Devaev
e30520d9f3 refactoring 2024-03-01 08:11:37 +02:00
Maxim Devaev
8f0acb2176 Bump version: 5.56 → 5.57 2024-03-01 04:33:13 +02:00
Maxim Devaev
8edeff8160 fixed makefile 2024-03-01 04:21:45 +02:00
Maxim Devaev
cacec0d25c refactoring 2024-03-01 04:09:16 +02:00
57 changed files with 1657 additions and 1164 deletions

View File

@@ -1,7 +1,7 @@
[bumpversion]
commit = True
tag = True
current_version = 5.56
current_version = 6.0
parse = (?P<major>\d+)\.(?P<minor>\d+)
serialize =
{major}.{minor}

2
.gitignore vendored
View File

@@ -7,7 +7,7 @@
/python/ustreamer.egg-info/
/janus/build/
/ustreamer
/ustreamer-dump
/ustreamer-*
/config.mk
vgcore.*
*.sock

View File

@@ -36,8 +36,8 @@ endif
apps:
$(MAKE) -C src
for i in src/ustreamer.bin src/ustreamer-*.bin; do \
test ! -x $$i || ln -sf $$i .; \
for i in src/*.bin; do \
test ! -x $$i || ln -sf $$i `basename $$i .bin`; \
done

View File

@@ -85,13 +85,13 @@ Without arguments, ```ustreamer``` will try to open ```/dev/video0``` with 640x4
:exclamation: Please note that since µStreamer v2.0 cross-domain requests were disabled by default for [security reasons](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS). To enable the old behavior, use the option `--allow-origin=\*`.
The recommended way of running µStreamer with [Auvidea B101](https://www.raspberrypi.org/forums/viewtopic.php?f=38&t=120702&start=400#p1339178) on Raspberry Pi:
The recommended way of running µStreamer with [TC358743-based capture device](https://www.raspberrypi.org/forums/viewtopic.php?f=38&t=120702&start=400#p1339178) on Raspberry Pi:
```
$ ./ustreamer \
--format=uyvy \ # Device input format
--encoder=m2m-image \ # Hardware encoding on V4L2 M2M driver
--workers=3 \ # Workers number
--persistent \ # Don't re-initialize device on timeout (for example when HDMI cable was disconnected)
--persistent \ # Suppress repetitive signal source errors (for example when HDMI cable was disconnected)
--dv-timings \ # Use DV-timings
--drop-same-frames=30 # Save the traffic
```

View File

@@ -170,7 +170,7 @@ int us_audio_get_encoded(us_audio_s *audio, uint8_t *data, size_t *size, uint64_
if (ri < 0) {
return -2;
}
_enc_buffer_s *const buf = audio->enc_ring->items[ri];
const _enc_buffer_s *const buf = audio->enc_ring->items[ri];
if (*size < buf->used) {
us_ring_consumer_release(audio->enc_ring, ri);
return -3;
@@ -195,7 +195,7 @@ static _enc_buffer_s *_enc_buffer_init(void) {
}
static void *_pcm_thread(void *v_audio) {
US_THREAD_RENAME("us_a_pcm");
US_THREAD_SETTLE("us_a_pcm");
us_audio_s *const audio = (us_audio_s *)v_audio;
uint8_t in[_MAX_BUF8];
@@ -225,7 +225,7 @@ static void *_pcm_thread(void *v_audio) {
}
static void *_encoder_thread(void *v_audio) {
US_THREAD_RENAME("us_a_enc");
US_THREAD_SETTLE("us_a_enc");
us_audio_s *const audio = (us_audio_s *)v_audio;
int16_t in_res[_MAX_BUF16];

View File

@@ -93,10 +93,12 @@ void us_janus_client_send(us_janus_client_s *client, const us_rtp_s *rtp) {
}
static void *_video_thread(void *v_client) {
US_THREAD_SETTLE("us_c_video");
return _common_thread(v_client, true);
}
static void *_audio_thread(void *v_client) {
US_THREAD_SETTLE("us_c_audio");
return _common_thread(v_client, false);
}

View File

@@ -100,14 +100,13 @@ janus_plugin *create(void);
static void *_video_rtp_thread(void *arg) {
(void)arg;
US_THREAD_RENAME("us_video_rtp");
US_THREAD_SETTLE("us_video_rtp");
atomic_store(&_g_video_rtp_tid_created, true);
while (!_STOP) {
const int ri = us_ring_consumer_acquire(_g_video_ring, 0.1);
if (ri >= 0) {
us_frame_s *frame = _g_video_ring->items[ri];
const us_frame_s *const frame = _g_video_ring->items[ri];
_LOCK_VIDEO;
const bool zero_playout_delay = (frame->gop == 0);
us_rtpv_wrap(_g_rtpv, frame, zero_playout_delay);
@@ -120,8 +119,7 @@ static void *_video_rtp_thread(void *arg) {
static void *_video_sink_thread(void *arg) {
(void)arg;
US_THREAD_RENAME("us_video_sink");
US_THREAD_SETTLE("us_video_sink");
atomic_store(&_g_video_sink_tid_created, true);
us_frame_s *drop = us_frame_init();
@@ -185,15 +183,16 @@ static void *_video_sink_thread(void *arg) {
US_JLOG_INFO("video", "Memsink closed");
sleep(1); // error_delay
}
us_frame_destroy(drop);
return NULL;
}
static void *_audio_thread(void *arg) {
(void)arg;
US_THREAD_RENAME("us_audio");
US_THREAD_SETTLE("us_audio");
atomic_store(&_g_audio_tid_created, true);
assert(_g_config->audio_dev_name != NULL);
assert(_g_config->tc358743_dev_path != NULL);

View File

@@ -1,6 +1,6 @@
.\" Manpage for ustreamer-dump.
.\" Open an issue or pull request to https://github.com/pikvm/ustreamer to correct errors or typos
.TH USTREAMER-DUMP 1 "version 5.56" "January 2021"
.TH USTREAMER-DUMP 1 "version 6.0" "January 2021"
.SH NAME
ustreamer-dump \- Dump uStreamer's memory sink to file

View File

@@ -1,6 +1,6 @@
.\" Manpage for ustreamer.
.\" Open an issue or pull request to https://github.com/pikvm/ustreamer to correct errors or typos
.TH USTREAMER 1 "version 5.56" "November 2020"
.TH USTREAMER 1 "version 6.0" "November 2020"
.SH NAME
ustreamer \- stream MJPEG video from any V4L2 device to the network
@@ -17,7 +17,7 @@ Without arguments, \fBustreamer\fR will try to open \fB/dev/video0\fR with 640x4
Please note that since µStreamer v2\.0 cross\-domain requests were disabled by default for security reasons\. To enable the old behavior, use the option \fB\-\-allow\-origin=\e*\fR\.
For example, the recommended way of running µStreamer with Auvidea B101 on a Raspberry Pi is:
For example, the recommended way of running µStreamer with TC358743-based capture device on a Raspberry Pi is:
\fBustreamer \e\fR
.RS
@@ -27,7 +27,7 @@ For example, the recommended way of running µStreamer with Auvidea B101 on a Ra
.nf
\fB\-\-workers=3 \e\fR # Maximum workers for V4L2 encoder
.nf
\fB\-\-persistent \e\fR # Don\'t re\-initialize device on timeout (for example when HDMI cable was disconnected)
\fB\-\-persistent \e\fR # Suppress repetitive signal source errors (for example when HDMI cable was disconnected)
.nf
\fB\-\-dv\-timings \e\fR # Use DV\-timings
.nf
@@ -69,7 +69,7 @@ Desired FPS. Default: maximum possible.
Drop frames smaller then this limit. Useful if the device produces small\-sized garbage frames. Default: 128 bytes.
.TP
.BR \-n ", " \-\-persistent
Don't re\-initialize device on timeout. Default: disabled.
Suppress repetitive signal source errors. Default: disabled.
.TP
.BR \-t ", " \-\-dv\-timings
Enable DV-timings querying and events processing to automatic resolution change. Default: disabled.
@@ -106,7 +106,7 @@ It doesn't do anything. Still here for compatibility.
It doesn't do anything. Still here for compatibility.
.TP
.BR \-K\ \fIsec ", " \-\-last\-as\-blank\ \fIsec
Show the last frame received from the camera after it was disconnected, but no more than specified time (or endlessly if 0 is specified). If the device has not yet been online, display some error message. Note: currently this option has no effect on memory sinks. Default: disabled.
It doesn't do anything. Still here for compatibility.
.TP
.BR \-l ", " \-\-slowdown
Slowdown capturing to 1 FPS or less when no stream or sink clients are connected. Useful to reduce CPU consumption. Default: disabled.

View File

@@ -3,7 +3,7 @@
pkgname=ustreamer
pkgver=5.56
pkgver=6.0
pkgrel=1
pkgdesc="Lightweight and fast MJPEG-HTTP streamer"
url="https://github.com/pikvm/ustreamer"

View File

@@ -6,7 +6,7 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=ustreamer
PKG_VERSION:=5.56
PKG_VERSION:=6.0
PKG_RELEASE:=1
PKG_MAINTAINER:=Maxim Devaev <mdevaev@gmail.com>

View File

@@ -17,7 +17,7 @@ def _find_sources(suffix: str) -> list[str]:
if __name__ == "__main__":
setup(
name="ustreamer",
version="5.56",
version="6.0",
description="uStreamer tools",
author="Maxim Devaev",
author_email="mdevaev@gmail.com",

View File

@@ -109,57 +109,66 @@ static PyObject *_MemsinkObject_exit(_MemsinkObject *self, PyObject *Py_UNUSED(i
static int _wait_frame(_MemsinkObject *self) {
const ldf deadline_ts = us_get_now_monotonic() + self->wait_timeout;
# define RETURN_OS_ERROR { \
Py_BLOCK_THREADS \
PyErr_SetFromErrno(PyExc_OSError); \
return -1; \
}
int locked = -1;
ldf now_ts;
do {
Py_BEGIN_ALLOW_THREADS
const int retval = us_flock_timedwait_monotonic(self->fd, self->lock_timeout);
locked = us_flock_timedwait_monotonic(self->fd, self->lock_timeout);
now_ts = us_get_now_monotonic();
if (retval < 0 && errno != EWOULDBLOCK) {
RETURN_OS_ERROR;
} else if (retval == 0) {
us_memsink_shared_s *mem = self->mem;
if (mem->magic == US_MEMSINK_MAGIC && mem->version == US_MEMSINK_VERSION && mem->id != self->frame_id) {
if (self->drop_same_frames > 0) {
if (
US_FRAME_COMPARE_GEOMETRY(self->mem, self->frame)
&& (self->frame_ts + self->drop_same_frames > now_ts)
&& !memcmp(self->frame->data, mem->data, mem->used)
) {
self->frame_id = mem->id;
goto drop;
}
}
Py_BLOCK_THREADS
return 0;
if (locked < 0) {
if (errno == EWOULDBLOCK) {
goto retry;
}
goto os_error;
}
if (flock(self->fd, LOCK_UN) < 0) {
RETURN_OS_ERROR;
us_memsink_shared_s *mem = self->mem;
if (mem->magic != US_MEMSINK_MAGIC || mem->version != US_MEMSINK_VERSION) {
goto retry;
}
// Let the sink know that the client is alive
mem->last_client_ts = now_ts;
if (mem->id == self->frame_id) {
goto retry;
}
if (self->drop_same_frames > 0) {
if (
US_FRAME_COMPARE_GEOMETRY(self->mem, self->frame)
&& (self->frame_ts + self->drop_same_frames > now_ts)
&& !memcmp(self->frame->data, mem->data, mem->used)
) {
self->frame_id = mem->id;
goto retry;
}
}
drop:
// New frame found
Py_BLOCK_THREADS
return 0;
os_error:
Py_BLOCK_THREADS
PyErr_SetFromErrno(PyExc_OSError);
return -1;
retry:
if (locked >= 0 && flock(self->fd, LOCK_UN) < 0) {
goto os_error;
}
if (usleep(1000) < 0) {
RETURN_OS_ERROR;
goto os_error;
}
Py_END_ALLOW_THREADS
if (PyErr_CheckSignals() < 0) {
return -1;
}
} while (now_ts < deadline_ts);
} while (now_ts < deadline_ts);
return -2;
# undef RETURN_OS_ERROR
}
static PyObject *_MemsinkObject_wait_frame(_MemsinkObject *self, PyObject *args, PyObject *kwargs) {
@@ -185,7 +194,6 @@ static PyObject *_MemsinkObject_wait_frame(_MemsinkObject *self, PyObject *args,
US_FRAME_COPY_META(self->mem, self->frame);
self->frame_id = mem->id;
self->frame_ts = us_get_now_monotonic();
mem->last_client_ts = self->frame_ts;
if (key_required) {
mem->key_requested = true;
}

View File

@@ -14,7 +14,7 @@ _V4P = ustreamer-v4p.bin
_CFLAGS = -MD -c -std=c17 -Wall -Wextra -D_GNU_SOURCE $(CFLAGS)
_LDFLAGS = $(LDFLAGS)
_COMMON_LIBS = -lm -ljpeg -pthread -lrt
_COMMON_LIBS = -lm -ljpeg -pthread -lrt -latomic
_USTR_LIBS = $(_COMMON_LIBS) -levent -levent_pthreads
_USTR_SRCS = $(shell ls \

View File

@@ -24,7 +24,6 @@
#include <stdlib.h>
#include <stdbool.h>
#include <unistd.h>
#include <signal.h>
#include <limits.h>
#include <float.h>
#include <getopt.h>
@@ -36,6 +35,7 @@
#include "../libs/logging.h"
#include "../libs/frame.h"
#include "../libs/memsink.h"
#include "../libs/signal.h"
#include "../libs/options.h"
#include "file.h"
@@ -95,7 +95,6 @@ typedef struct {
static void _signal_handler(int signum);
static void _install_signal_handlers(void);
static int _dump_sink(
const char *sink_name, unsigned sink_timeout,
@@ -190,7 +189,7 @@ int main(int argc, char *argv[]) {
ctx.destroy = us_output_file_destroy;
}
_install_signal_handlers();
us_install_signals_handler(_signal_handler, false);
const int retval = abs(_dump_sink(sink_name, sink_timeout, count, interval, key_required, &ctx));
if (ctx.v_output && ctx.destroy) {
ctx.destroy(ctx.v_output);
@@ -206,25 +205,6 @@ static void _signal_handler(int signum) {
_g_stop = true;
}
static void _install_signal_handlers(void) {
struct sigaction sig_act = {0};
assert(!sigemptyset(&sig_act.sa_mask));
sig_act.sa_handler = _signal_handler;
assert(!sigaddset(&sig_act.sa_mask, SIGINT));
assert(!sigaddset(&sig_act.sa_mask, SIGTERM));
assert(!sigaddset(&sig_act.sa_mask, SIGPIPE));
US_LOG_DEBUG("Installing SIGINT handler ...");
assert(!sigaction(SIGINT, &sig_act, NULL));
US_LOG_DEBUG("Installing SIGTERM handler ...");
assert(!sigaction(SIGTERM, &sig_act, NULL));
US_LOG_DEBUG("Installing SIGTERM handler ...");
assert(!sigaction(SIGPIPE, &sig_act, NULL));
}
static int _dump_sink(
const char *sink_name, unsigned sink_timeout,
long long count, long double interval,

View File

@@ -25,8 +25,8 @@
#include "types.h"
#define US_VERSION_MAJOR 5
#define US_VERSION_MINOR 56
#define US_VERSION_MAJOR 6
#define US_VERSION_MINOR 0
#define US_MAKE_VERSION2(_major, _minor) #_major "." #_minor
#define US_MAKE_VERSION1(_major, _minor) US_MAKE_VERSION2(_major, _minor)

View File

@@ -23,6 +23,7 @@
#include "device.h"
#include <stdlib.h>
#include <stdatomic.h>
#include <stddef.h>
#include <string.h>
#include <strings.h>
@@ -80,12 +81,12 @@ static const struct {
{"USERPTR", V4L2_MEMORY_USERPTR},
};
static int _device_wait_buffer(us_device_s *dev);
static int _device_consume_event(us_device_s *dev);
static void _v4l2_buffer_copy(const struct v4l2_buffer *src, struct v4l2_buffer *dest);
static bool _device_is_buffer_valid(us_device_s *dev, const struct v4l2_buffer *buf, const u8 *data);
static int _device_open_check_cap(us_device_s *dev);
static int _device_open_dv_timings(us_device_s *dev);
static int _device_apply_dv_timings(us_device_s *dev);
static int _device_open_dv_timings(us_device_s *dev, bool apply);
static int _device_open_format(us_device_s *dev, bool first);
static void _device_open_hw_fps(us_device_s *dev);
static void _device_open_jpeg_quality(us_device_s *dev);
@@ -149,16 +150,16 @@ int us_device_parse_format(const char *str) {
return item->format;
}
});
return US_FORMAT_UNKNOWN;
return -1;
}
v4l2_std_id us_device_parse_standard(const char *str) {
US_ARRAY_ITERATE(_STANDARDS, 1, item, {
int us_device_parse_standard(const char *str) {
US_ARRAY_ITERATE(_STANDARDS, 0, item, {
if (!strcasecmp(item->name, str)) {
return item->standard;
}
});
return US_STANDARD_UNKNOWN;
return -1;
}
int us_device_parse_io_method(const char *str) {
@@ -167,20 +168,46 @@ int us_device_parse_io_method(const char *str) {
return item->io_method;
}
});
return US_IO_METHOD_UNKNOWN;
return -1;
}
int us_device_open(us_device_s *dev) {
us_device_runtime_s *const run = dev->run;
if ((run->fd = open(dev->path, O_RDWR|O_NONBLOCK)) < 0) {
_D_LOG_PERROR("Can't open device");
if (access(dev->path, R_OK | W_OK) < 0) {
if (run->open_error_reported != -errno) {
run->open_error_reported = -errno; // Don't confuse it with __LINE__
US_LOG_PERROR("No access to capture device");
}
goto tmp_error;
}
_D_LOG_DEBUG("Opening capture device ...");
if ((run->fd = open(dev->path, O_RDWR | O_NONBLOCK)) < 0) {
_D_LOG_PERROR("Can't capture open device");
goto error;
}
_D_LOG_DEBUG("Capture device fd=%d opened", run->fd);
if (dev->dv_timings && dev->persistent) {
_D_LOG_DEBUG("Probing DV-timings or QuerySTD ...");
if (_device_open_dv_timings(dev, false) < 0) {
const int line = __LINE__;
if (run->open_error_reported != line) {
run->open_error_reported = line;
_D_LOG_ERROR("No signal from source");
}
goto tmp_error;
}
}
if (_device_open_check_cap(dev) < 0) {
goto error;
}
if (_device_open_dv_timings(dev) < 0) {
if (_device_apply_resolution(dev, dev->width, dev->height, dev->run->hz)) {
goto error;
}
if (dev->dv_timings && _device_open_dv_timings(dev, true) < 0) {
goto error;
}
if (_device_open_format(dev, true) < 0) {
@@ -209,10 +236,17 @@ int us_device_open(us_device_s *dev) {
goto error;
}
run->streamon = true;
run->open_error_reported = 0;
_D_LOG_INFO("Capturing started");
return 0;
tmp_error:
us_device_close(dev);
return -2;
error:
run->open_error_reported = 0;
us_device_close(dev);
return -1;
}
@@ -220,17 +254,22 @@ error:
void us_device_close(us_device_s *dev) {
us_device_runtime_s *const run = dev->run;
bool say = false;
if (run->streamon) {
say = true;
_D_LOG_DEBUG("Calling VIDIOC_STREAMOFF ...");
enum v4l2_buf_type type = run->capture_type;
if (us_xioctl(run->fd, VIDIOC_STREAMOFF, &type) < 0) {
_D_LOG_PERROR("Can't stop capturing");
}
run->streamon = false;
_D_LOG_INFO("Capturing stopped");
_D_LOG_DEBUG("VIDIOC_STREAMOFF successful");
}
if (run->hw_bufs != NULL) {
_D_LOG_DEBUG("Releasing device buffers ...");
say = true;
_D_LOG_DEBUG("Releasing HW buffers ...");
for (uint index = 0; index < run->n_bufs; ++index) {
us_hw_buffer_s *hw = &run->hw_bufs[index];
@@ -239,7 +278,7 @@ void us_device_close(us_device_s *dev) {
if (dev->io_method == V4L2_MEMORY_MMAP) {
if (hw->raw.allocated > 0 && hw->raw.data != NULL) {
if (munmap(hw->raw.data, hw->raw.allocated) < 0) {
_D_LOG_PERROR("Can't unmap device buffer=%u", index);
_D_LOG_PERROR("Can't unmap HW buffer=%u", index);
}
}
} else { // V4L2_MEMORY_USERPTR
@@ -252,58 +291,29 @@ void us_device_close(us_device_s *dev) {
}
US_DELETE(run->hw_bufs, free);
run->n_bufs = 0;
_D_LOG_DEBUG("All HW buffers released");
}
US_CLOSE_FD(run->fd);
run->persistent_timeout_reported = false;
}
int us_device_select(us_device_s *dev, bool *has_read, bool *has_error) {
us_device_runtime_s *const run = dev->run;
# define INIT_FD_SET(x_set) \
fd_set x_set; FD_ZERO(&x_set); FD_SET(run->fd, &x_set);
INIT_FD_SET(read_fds);
INIT_FD_SET(error_fds);
# undef INIT_FD_SET
// Раньше мы проверяли и has_write, но потом выяснилось, что libcamerify зачем-то
// генерирует эвенты на запись, вероятно ошибочно. Судя по всему, игнорирование
// has_write не делает никому плохо.
struct timeval timeout;
timeout.tv_sec = dev->timeout;
timeout.tv_usec = 0;
_D_LOG_DEBUG("Calling select() on video device ...");
int retval = select(run->fd + 1, &read_fds, NULL, &error_fds, &timeout);
if (retval > 0) {
*has_read = FD_ISSET(run->fd, &read_fds);
*has_error = FD_ISSET(run->fd, &error_fds);
} else {
*has_read = false;
*has_error = false;
if (say) {
_D_LOG_INFO("Capturing stopped");
}
_D_LOG_DEBUG("Device select() --> %d; has_read=%d, has_error=%d", retval, *has_read, *has_error);
if (retval > 0) {
run->persistent_timeout_reported = false;
} else if (retval == 0) {
if (dev->persistent) {
if (!run->persistent_timeout_reported) {
_D_LOG_ERROR("Persistent device timeout (unplugged)");
run->persistent_timeout_reported = true;
}
} else {
// Если устройство не персистентное, то таймаут является ошибкой
retval = -1;
}
}
return retval;
}
int us_device_grab_buffer(us_device_s *dev, us_hw_buffer_s **hw) {
// Это сложная функция, которая делает сразу много всего, чтобы получить новый фрейм.
// - Вызывается _device_wait_buffer() с select() внутри, чтобы подождать новый фрейм
// или эвент V4L2. Обработка эвентов более приоритетна, чем кадров.
// - Если есть новые фреймы, то пропустить их все, пока не закончатся и вернуть
// самый-самый свежий, содержащий при этом валидные данные.
// - Если таковых не нашлось, вернуть -2.
// - Ошибка -1 возвращается при любых сбоях.
if (_device_wait_buffer(dev) < 0) {
return -1;
}
us_device_runtime_s *const run = dev->run;
*hw = NULL;
@@ -319,7 +329,7 @@ int us_device_grab_buffer(us_device_s *dev, us_hw_buffer_s **hw) {
uint skipped = 0;
bool broken = false;
_D_LOG_DEBUG("Grabbing device buffer ...");
_D_LOG_DEBUG("Grabbing hw buffer ...");
do {
struct v4l2_buffer new = {0};
@@ -335,7 +345,7 @@ int us_device_grab_buffer(us_device_s *dev, us_hw_buffer_s **hw) {
if (new_got) {
if (new.index >= run->n_bufs) {
_D_LOG_ERROR("V4L2 error: grabbed invalid device buffer=%u, n_bufs=%u", new.index, run->n_bufs);
_D_LOG_ERROR("V4L2 error: grabbed invalid HW buffer=%u, n_bufs=%u", new.index, run->n_bufs);
return -1;
}
@@ -343,7 +353,7 @@ int us_device_grab_buffer(us_device_s *dev, us_hw_buffer_s **hw) {
# define FRAME_DATA(x_buf) run->hw_bufs[x_buf.index].raw.data
if (GRABBED(new)) {
_D_LOG_ERROR("V4L2 error: grabbed device buffer=%u is already used", new.index);
_D_LOG_ERROR("V4L2 error: grabbed HW buffer=%u is already used", new.index);
return -1;
}
GRABBED(new) = true;
@@ -354,9 +364,9 @@ int us_device_grab_buffer(us_device_s *dev, us_hw_buffer_s **hw) {
broken = !_device_is_buffer_valid(dev, &new, FRAME_DATA(new));
if (broken) {
_D_LOG_DEBUG("Releasing device buffer=%u (broken frame) ...", new.index);
_D_LOG_DEBUG("Releasing HW buffer=%u (broken frame) ...", new.index);
if (us_xioctl(run->fd, VIDIOC_QBUF, &new) < 0) {
_D_LOG_PERROR("Can't release device buffer=%u (broken frame)", new.index);
_D_LOG_PERROR("Can't release HW buffer=%u (broken frame)", new.index);
return -1;
}
GRABBED(new) = false;
@@ -365,7 +375,7 @@ int us_device_grab_buffer(us_device_s *dev, us_hw_buffer_s **hw) {
if (buf_got) {
if (us_xioctl(run->fd, VIDIOC_QBUF, &buf) < 0) {
_D_LOG_PERROR("Can't release device buffer=%u (skipped frame)", buf.index);
_D_LOG_PERROR("Can't release HW buffer=%u (skipped frame)", buf.index);
return -1;
}
GRABBED(buf) = false;
@@ -387,12 +397,13 @@ int us_device_grab_buffer(us_device_s *dev, us_hw_buffer_s **hw) {
return -2; // If we have only broken frames on this capture session
}
}
_D_LOG_PERROR("Can't grab device buffer");
_D_LOG_PERROR("Can't grab HW buffer");
return -1;
}
} while (true);
*hw = &run->hw_bufs[buf.index];
atomic_store(&(*hw)->refs, 0);
(*hw)->raw.dma_fd = (*hw)->dma_fd;
(*hw)->raw.used = buf.bytesused;
(*hw)->raw.width = run->width;
@@ -403,36 +414,89 @@ int us_device_grab_buffer(us_device_s *dev, us_hw_buffer_s **hw) {
_v4l2_buffer_copy(&buf, &(*hw)->buf);
(*hw)->raw.grab_ts = (ldf)((buf.timestamp.tv_sec * (u64)1000) + (buf.timestamp.tv_usec / 1000)) / 1000;
_D_LOG_DEBUG("Grabbed new frame: buffer=%u, bytesused=%u, grab_ts=%.3Lf, latency=%.3Lf, skipped=%u",
_D_LOG_DEBUG("Grabbed HW buffer=%u: bytesused=%u, grab_ts=%.3Lf, latency=%.3Lf, skipped=%u",
buf.index, buf.bytesused, (*hw)->raw.grab_ts, us_get_now_monotonic() - (*hw)->raw.grab_ts, skipped);
return buf.index;
}
int us_device_release_buffer(us_device_s *dev, us_hw_buffer_s *hw) {
assert(atomic_load(&hw->refs) == 0);
const uint index = hw->buf.index;
_D_LOG_DEBUG("Releasing device buffer=%u ...", index);
_D_LOG_DEBUG("Releasing HW buffer=%u ...", index);
if (us_xioctl(dev->run->fd, VIDIOC_QBUF, &hw->buf) < 0) {
_D_LOG_PERROR("Can't release device buffer=%u", index);
_D_LOG_PERROR("Can't release HW buffer=%u", index);
return -1;
}
hw->grabbed = false;
_D_LOG_DEBUG("HW buffer=%u released", index);
return 0;
}
int us_device_consume_event(us_device_s *dev) {
struct v4l2_event event;
_D_LOG_INFO("Consuming V4L2 event ...");
if (us_xioctl(dev->run->fd, VIDIOC_DQEVENT, &event) == 0) {
switch (event.type) {
case V4L2_EVENT_SOURCE_CHANGE:
_D_LOG_INFO("Got V4L2_EVENT_SOURCE_CHANGE: source changed");
return -1;
case V4L2_EVENT_EOS:
_D_LOG_INFO("Got V4L2_EVENT_EOS: end of stream (ignored)");
return 0;
void us_device_buffer_incref(us_hw_buffer_s *hw) {
atomic_fetch_add(&hw->refs, 1);
}
void us_device_buffer_decref(us_hw_buffer_s *hw) {
atomic_fetch_sub(&hw->refs, 1);
}
int _device_wait_buffer(us_device_s *dev) {
us_device_runtime_s *const run = dev->run;
# define INIT_FD_SET(x_set) \
fd_set x_set; FD_ZERO(&x_set); FD_SET(run->fd, &x_set);
INIT_FD_SET(read_fds);
INIT_FD_SET(error_fds);
# undef INIT_FD_SET
// Раньше мы проверяли и has_write, но потом выяснилось, что libcamerify зачем-то
// генерирует эвенты на запись, вероятно ошибочно. Судя по всему, игнорирование
// has_write не делает никому плохо.
struct timeval timeout;
timeout.tv_sec = dev->timeout;
timeout.tv_usec = 0;
_D_LOG_DEBUG("Calling select() on video device ...");
bool has_read = false;
bool has_error = false;
const int selected = select(run->fd + 1, &read_fds, NULL, &error_fds, &timeout);
if (selected > 0) {
has_read = FD_ISSET(run->fd, &read_fds);
has_error = FD_ISSET(run->fd, &error_fds);
}
_D_LOG_DEBUG("Device select() --> %d; has_read=%d, has_error=%d", selected, has_read, has_error);
if (selected < 0) {
if (errno != EINTR) {
_D_LOG_PERROR("Device select() error");
}
return -1;
} else if (selected == 0) {
_D_LOG_ERROR("Device select() timeout");
return -1;
} else {
_D_LOG_PERROR("Got some V4L2 device event, but where is it? ");
if (has_error && _device_consume_event(dev) < 0) {
return -1; // Restart required
}
}
return 0;
}
static int _device_consume_event(us_device_s *dev) {
struct v4l2_event event;
if (us_xioctl(dev->run->fd, VIDIOC_DQEVENT, &event) < 0) {
_D_LOG_PERROR("Can't consume V4L2 event");
return -1;
}
switch (event.type) {
case V4L2_EVENT_SOURCE_CHANGE:
_D_LOG_INFO("Got V4L2_EVENT_SOURCE_CHANGE: Source changed");
return -1;
case V4L2_EVENT_EOS:
_D_LOG_INFO("Got V4L2_EVENT_EOS: End of stream");
return -1;
}
return 0;
}
@@ -535,68 +599,81 @@ static int _device_open_check_cap(us_device_s *dev) {
return 0;
}
static int _device_open_dv_timings(us_device_s *dev) {
_device_apply_resolution(dev, dev->width, dev->height, dev->run->hz);
if (dev->dv_timings) {
_D_LOG_DEBUG("Using DV-timings");
static int _device_open_dv_timings(us_device_s *dev, bool apply) {
// Just probe only if @apply is false
if (_device_apply_dv_timings(dev) < 0) {
return -1;
}
const us_device_runtime_s *const run = dev->run;
struct v4l2_event_subscription sub = {.type = V4L2_EVENT_SOURCE_CHANGE};
_D_LOG_DEBUG("Subscribing to DV-timings events ...")
if (us_xioctl(dev->run->fd, VIDIOC_SUBSCRIBE_EVENT, &sub) < 0) {
_D_LOG_PERROR("Can't subscribe to DV-timings events");
return -1;
}
}
return 0;
}
static int _device_apply_dv_timings(us_device_s *dev) {
us_device_runtime_s *const run = dev->run; // cppcheck-suppress constVariablePointer
int dv_errno = 0;
struct v4l2_dv_timings dv = {0};
_D_LOG_DEBUG("Calling us_xioctl(VIDIOC_QUERY_DV_TIMINGS) ...");
if (us_xioctl(run->fd, VIDIOC_QUERY_DV_TIMINGS, &dv) == 0) {
float hz = 0;
if (dv.type == V4L2_DV_BT_656_1120) {
// See v4l2_print_dv_timings() in the kernel
const uint htot = V4L2_DV_BT_FRAME_WIDTH(&dv.bt);
const uint vtot = V4L2_DV_BT_FRAME_HEIGHT(&dv.bt) / (dv.bt.interlaced ? 2 : 1);
const uint fps = ((htot * vtot) > 0 ? ((100 * (u64)dv.bt.pixelclock)) / (htot * vtot) : 0);
hz = (fps / 100) + (fps % 100) / 100.0;
_D_LOG_INFO("Got new DV-timings: %ux%u%s%.02f, pixclk=%llu, vsync=%u, hsync=%u",
dv.bt.width, dv.bt.height, (dv.bt.interlaced ? "i" : "p"), hz,
(ull)dv.bt.pixelclock, dv.bt.vsync, dv.bt.hsync); // See #11 about %llu
} else {
_D_LOG_INFO("Got new DV-timings: %ux%u, pixclk=%llu, vsync=%u, hsync=%u",
dv.bt.width, dv.bt.height,
(ull)dv.bt.pixelclock, dv.bt.vsync, dv.bt.hsync);
}
_D_LOG_DEBUG("Calling us_xioctl(VIDIOC_S_DV_TIMINGS) ...");
if (us_xioctl(run->fd, VIDIOC_S_DV_TIMINGS, &dv) < 0) {
_D_LOG_PERROR("Failed to set DV-timings");
return -1;
}
if (_device_apply_resolution(dev, dv.bt.width, dv.bt.height, hz) < 0) {
return -1;
}
} else {
_D_LOG_DEBUG("Calling us_xioctl(VIDIOC_QUERYSTD) ...");
if (us_xioctl(run->fd, VIDIOC_QUERYSTD, &dev->standard) == 0) {
_D_LOG_INFO("Applying the new VIDIOC_S_STD: %s ...", _standard_to_string(dev->standard));
if (us_xioctl(run->fd, VIDIOC_S_STD, &dev->standard) < 0) {
_D_LOG_PERROR("Can't set video standard");
return -1;
}
}
_D_LOG_DEBUG("Querying DV-timings (apply=%u) ...", apply);
if (us_xioctl(run->fd, VIDIOC_QUERY_DV_TIMINGS, &dv) < 0) {
// TC358743 errors here (see in the kernel: drivers/media/i2c/tc358743.c):
// - ENOLINK: No valid signal (SYS_STATUS & MASK_S_TMDS)
// - ENOLCK: No sync on signal (SYS_STATUS & MASK_S_SYNC)
dv_errno = errno;
goto querystd;
} else if (!apply) {
goto probe_only;
}
float hz = 0;
if (dv.type == V4L2_DV_BT_656_1120) {
// See v4l2_print_dv_timings() in the kernel
const uint htot = V4L2_DV_BT_FRAME_WIDTH(&dv.bt);
const uint vtot = V4L2_DV_BT_FRAME_HEIGHT(&dv.bt) / (dv.bt.interlaced ? 2 : 1);
const uint fps = ((htot * vtot) > 0 ? ((100 * (u64)dv.bt.pixelclock)) / (htot * vtot) : 0);
hz = (fps / 100) + (fps % 100) / 100.0;
_D_LOG_INFO("Detected DV-timings: %ux%u%s%.02f, pixclk=%llu, vsync=%u, hsync=%u",
dv.bt.width, dv.bt.height, (dv.bt.interlaced ? "i" : "p"), hz,
(ull)dv.bt.pixelclock, dv.bt.vsync, dv.bt.hsync); // See #11 about %llu
} else {
_D_LOG_INFO("Detected DV-timings: %ux%u, pixclk=%llu, vsync=%u, hsync=%u",
dv.bt.width, dv.bt.height,
(ull)dv.bt.pixelclock, dv.bt.vsync, dv.bt.hsync);
}
_D_LOG_DEBUG("Applying DV-timings ...");
if (us_xioctl(run->fd, VIDIOC_S_DV_TIMINGS, &dv) < 0) {
_D_LOG_PERROR("Failed to apply DV-timings");
return -1;
}
if (_device_apply_resolution(dev, dv.bt.width, dv.bt.height, hz) < 0) {
return -1;
}
goto subscribe;
querystd:
_D_LOG_DEBUG("Failed to query DV-timings, trying QuerySTD ...");
if (us_xioctl(run->fd, VIDIOC_QUERYSTD, &dev->standard) < 0) {
if (apply) {
char *std_error = us_errno_to_string(errno); // Read the errno first
char *dv_error = us_errno_to_string(dv_errno);
_D_LOG_ERROR("Failed to query DV-timings (%s) and QuerySTD (%s)", dv_error, std_error);
free(dv_error);
free(std_error);
}
return -1;
} else if (!apply) {
goto probe_only;
}
if (us_xioctl(run->fd, VIDIOC_S_STD, &dev->standard) < 0) {
_D_LOG_PERROR("Can't set apply standard: %s", _standard_to_string(dev->standard));
return -1;
}
_D_LOG_DEBUG("Applied new video standard: %s", _standard_to_string(dev->standard));
subscribe:
; // Empty statement for the goto label above
struct v4l2_event_subscription sub = {.type = V4L2_EVENT_SOURCE_CHANGE};
_D_LOG_DEBUG("Subscribing to V4L2_EVENT_SOURCE_CHANGE ...")
if (us_xioctl(dev->run->fd, VIDIOC_SUBSCRIBE_EVENT, &sub) < 0) {
_D_LOG_PERROR("Can't subscribe to V4L2_EVENT_SOURCE_CHANGE");
return -1;
}
probe_only:
return 0;
}
@@ -659,7 +736,7 @@ static int _device_open_format(us_device_s *dev, bool first) {
_format_to_string_supported(FMT(pixelformat)));
char *format_str;
if ((format_str = (char *)_format_to_string_nullable(FMT(pixelformat))) != NULL) {
if ((format_str = (char*)_format_to_string_nullable(FMT(pixelformat))) != NULL) {
_D_LOG_INFO("Falling back to format=%s", format_str);
} else {
char fourcc_str[8];
@@ -807,6 +884,7 @@ static int _device_open_io_method_mmap(us_device_s *dev) {
}
us_hw_buffer_s *hw = &run->hw_bufs[run->n_bufs];
atomic_init(&hw->refs, 0);
const uz buf_size = (run->capture_mplane ? buf.m.planes[0].length : buf.length);
const off_t buf_offset = (run->capture_mplane ? buf.m.planes[0].m.mem_offset : buf.m.offset);
@@ -1060,7 +1138,7 @@ static const char *_standard_to_string(v4l2_std_id standard) {
return item->name;
}
});
return _STANDARDS[0].name;
return "???";
}
static const char *_io_method_to_string_supported(enum v4l2_memory io_method) {

View File

@@ -22,6 +22,8 @@
#pragma once
#include <stdatomic.h>
#include <linux/videodev2.h>
#include "types.h"
@@ -29,20 +31,15 @@
#define US_VIDEO_MIN_WIDTH ((uint)160)
#define US_VIDEO_MAX_WIDTH ((uint)15360)
#define US_VIDEO_MAX_WIDTH ((uint)15360) // Remember about stream->run->http_capture_state;
#define US_VIDEO_MIN_HEIGHT ((uint)120)
#define US_VIDEO_MAX_HEIGHT ((uint)8640)
#define US_VIDEO_MAX_FPS ((uint)120)
#define US_STANDARD_UNKNOWN V4L2_STD_UNKNOWN
#define US_STANDARDS_STR "PAL, NTSC, SECAM"
#define US_FORMAT_UNKNOWN -1
#define US_FORMATS_STR "YUYV, YVYU, UYVY, RGB565, RGB24, BGR24, MJPEG, JPEG"
#define US_IO_METHOD_UNKNOWN -1
#define US_IO_METHODS_STR "MMAP, USERPTR"
@@ -51,6 +48,7 @@ typedef struct {
struct v4l2_buffer buf;
int dma_fd;
bool grabbed;
atomic_int refs;
} us_hw_buffer_s;
typedef struct {
@@ -69,7 +67,7 @@ typedef struct {
enum v4l2_buf_type capture_type;
bool capture_mplane;
bool streamon;
bool persistent_timeout_reported;
int open_error_reported;
} us_device_runtime_s;
typedef enum {
@@ -126,13 +124,14 @@ us_device_s *us_device_init(void);
void us_device_destroy(us_device_s *dev);
int us_device_parse_format(const char *str);
v4l2_std_id us_device_parse_standard(const char *str);
int us_device_parse_standard(const char *str);
int us_device_parse_io_method(const char *str);
int us_device_open(us_device_s *dev);
void us_device_close(us_device_s *dev);
int us_device_select(us_device_s *dev, bool *has_read, bool *has_error);
int us_device_grab_buffer(us_device_s *dev, us_hw_buffer_s **hw);
int us_device_release_buffer(us_device_s *dev, us_hw_buffer_s *hw);
int us_device_consume_event(us_device_s *dev);
void us_device_buffer_incref(us_hw_buffer_s *hw);
void us_device_buffer_decref(us_hw_buffer_s *hw);

View File

@@ -101,16 +101,35 @@ void us_memsink_destroy(us_memsink_s *sink) {
}
bool us_memsink_server_check(us_memsink_s *sink, const us_frame_s *frame) {
// Return true (the need to write to memsink) on any of these conditions:
// - EWOULDBLOCK - we have an active client;
// - Incorrect magic or version - need to first write;
// - We have some active clients by last_client_ts;
// - Frame meta differs (like size, format, but not timestamp).
// Если frame == NULL, то только проверяем наличие клиентов
// или необходимость инициализировать память.
assert(sink->server);
if (sink->mem->magic != US_MEMSINK_MAGIC || sink->mem->version != US_MEMSINK_VERSION) {
// Если регион памяти не был инициализирован, то нужно что-то туда положить.
// Блокировка не нужна, потому что только сервер пишет в эти переменные.
return true;
}
const ldf unsafe_ts = sink->mem->last_client_ts;
if (unsafe_ts != sink->unsafe_last_client_ts) {
// Клиент пишет в синке свою отметку last_client_ts при любом действии.
// Мы не берем блокировку здесь, а просто проверяем, является ли это число тем же самым,
// что было прочитано нами в предыдущих итерациях. Значению не нужно быть консистентным,
// и даже если мы прочитали мусор из-за гонки в памяти между чтением здеси и записью
// из клиента, мы все равно можем сделать вывод, есть ли у нас клиенты вообще.
// Если число число поменялось то у нас точно есть клиенты и дальнейшие проверки
// проводить не требуется. Если же число неизменно, то стоит поставить блокировку
// и проверить, нужно ли записать что-нибудь в память для инициализации фрейма.
sink->unsafe_last_client_ts = unsafe_ts;
atomic_store(&sink->has_clients, true);
return true;
}
if (flock(sink->fd, LOCK_EX | LOCK_NB) < 0) {
if (errno == EWOULDBLOCK) {
// Есть живой клиент, который прямо сейчас взял блокировку и читает фрейм из синка
atomic_store(&sink->has_clients, true);
return true;
}
@@ -118,10 +137,7 @@ bool us_memsink_server_check(us_memsink_s *sink, const us_frame_s *frame) {
return false;
}
if (sink->mem->magic != US_MEMSINK_MAGIC || sink->mem->version != US_MEMSINK_VERSION) {
return true;
}
// Проверяем, есть ли у нас живой клиент по таймауту
const bool has_clients = (sink->mem->last_client_ts + sink->client_ttl > us_get_now_monotonic());
atomic_store(&sink->has_clients, has_clients);
@@ -129,13 +145,20 @@ bool us_memsink_server_check(us_memsink_s *sink, const us_frame_s *frame) {
US_LOG_PERROR("%s-sink: Can't unlock memory", sink->name);
return false;
}
return (has_clients || !US_FRAME_COMPARE_GEOMETRY(sink->mem, frame));;
if (has_clients) {
return true;
}
if (frame != NULL && !US_FRAME_COMPARE_GEOMETRY(sink->mem, frame)) {
// Если есть изменения в геометрии/формате фрейма, то их тоже нобходимо сразу записать в синк
return true;
}
return false;
}
int us_memsink_server_put(us_memsink_s *sink, const us_frame_s *frame, bool *key_requested) {
assert(sink->server);
const long double now = us_get_now_monotonic();
const ldf now = us_get_now_monotonic();
if (frame->used > US_MEMSINK_MAX_DATA) {
US_LOG_ERROR("%s-sink: Can't put frame: is too big (%zu > %zu)",
@@ -146,12 +169,13 @@ int us_memsink_server_put(us_memsink_s *sink, const us_frame_s *frame, bool *key
if (us_flock_timedwait_monotonic(sink->fd, 1) == 0) {
US_LOG_VERBOSE("%s-sink: >>>>> Exposing new frame ...", sink->name);
sink->last_id = us_get_now_id();
sink->mem->id = sink->last_id;
sink->mem->id = us_get_now_id();
if (sink->mem->key_requested && frame->key) {
sink->mem->key_requested = false;
}
*key_requested = sink->mem->key_requested;
if (key_requested != NULL) { // We don't need it for non-H264 sinks
*key_requested = sink->mem->key_requested;
}
memcpy(sink->mem->data, frame->data, frame->used);
sink->mem->used = frame->used;
@@ -190,26 +214,35 @@ int us_memsink_client_get(us_memsink_s *sink, us_frame_s *frame, bool *key_reque
return -1;
}
int retval = -2; // Not updated
int retval = 0;
if (sink->mem->magic == US_MEMSINK_MAGIC) {
if (sink->mem->version != US_MEMSINK_VERSION) {
US_LOG_ERROR("%s-sink: Protocol version mismatch: sink=%u, required=%u",
sink->name, sink->mem->version, US_MEMSINK_VERSION);
retval = -1;
goto done;
}
if (sink->mem->id != sink->last_id) { // When updated
sink->last_id = sink->mem->id;
us_frame_set_data(frame, sink->mem->data, sink->mem->used);
US_FRAME_COPY_META(sink->mem, frame);
*key_requested = sink->mem->key_requested;
retval = 0;
}
sink->mem->last_client_ts = us_get_now_monotonic();
if (key_required) {
sink->mem->key_requested = true;
}
if (sink->mem->magic != US_MEMSINK_MAGIC) {
retval = -2; // Not updated
goto done;
}
if (sink->mem->version != US_MEMSINK_VERSION) {
US_LOG_ERROR("%s-sink: Protocol version mismatch: sink=%u, required=%u",
sink->name, sink->mem->version, US_MEMSINK_VERSION);
retval = -1;
goto done;
}
// Let the sink know that the client is alive
sink->mem->last_client_ts = us_get_now_monotonic();
if (sink->mem->id == sink->last_readed_id) {
retval = -2; // Not updated
goto done;
}
sink->last_readed_id = sink->mem->id;
us_frame_set_data(frame, sink->mem->data, sink->mem->used);
US_FRAME_COPY_META(sink->mem, frame);
if (key_requested != NULL) { // We don't need it for non-H264 sinks
*key_requested = sink->mem->key_requested;
}
if (key_required) {
sink->mem->key_requested = true;
}
done:

View File

@@ -41,8 +41,11 @@ typedef struct {
int fd;
us_memsink_shared_s *mem;
u64 last_id;
atomic_bool has_clients; // Only for server
u64 last_readed_id; // Only for client
atomic_bool has_clients; // Only for server results
ldf unsafe_last_client_ts; // Only for server
} us_memsink_s;

View File

@@ -104,9 +104,9 @@ int us_queue_get(us_queue_s *queue, void **item, ldf timeout) {
#undef _WAIT_OR_UNLOCK
/*int us_queue_get_free(us_queue_s *queue) {
bool us_queue_is_empty(us_queue_s *queue) {
US_MUTEX_LOCK(queue->mutex);
const uint size = queue->size;
US_MUTEX_UNLOCK(queue->mutex);
return queue->capacity - size;
}*/
return (bool)(queue->capacity - size);
}

View File

@@ -45,7 +45,7 @@ typedef struct {
#define US_QUEUE_DELETE_WITH_ITEMS(x_queue, x_free_item) { \
if (x_queue) { \
while (!us_queue_get_free(x_queue)) { \
while (!us_queue_is_empty(x_queue)) { \
void *m_ptr; \
if (!us_queue_get(x_queue, &m_ptr, 0)) { \
US_DELETE(m_ptr, x_free_item); \
@@ -61,4 +61,4 @@ void us_queue_destroy(us_queue_s *queue);
int us_queue_put(us_queue_s *queue, void *item, ldf timeout);
int us_queue_get(us_queue_s *queue, void **item, ldf timeout);
// int us_queue_get_free(us_queue_s *queue);
bool us_queue_is_empty(us_queue_s *queue);

82
src/libs/signal.c Normal file
View File

@@ -0,0 +1,82 @@
/*****************************************************************************
# #
# uStreamer - Lightweight and fast MJPEG-HTTP streamer. #
# #
# Copyright (C) 2018-2023 Maxim Devaev <mdevaev@gmail.com> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <https://www.gnu.org/licenses/>. #
# #
*****************************************************************************/
#include "signal.h"
#include <string.h>
#include <signal.h>
#include <assert.h>
#if defined(__GLIBC__) && __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 32
# define HAS_SIGABBREV_NP
#endif
#include "types.h"
#include "tools.h"
#include "logging.h"
char *us_signum_to_string(int signum) {
# ifdef HAS_SIGABBREV_NP
const char *const name = sigabbrev_np(signum);
# else
const char *const name = (
signum == SIGTERM ? "TERM" :
signum == SIGINT ? "INT" :
signum == SIGPIPE ? "PIPE" :
NULL
);
# endif
char *buf;
if (name != NULL) {
US_ASPRINTF(buf, "SIG%s", name);
} else {
US_ASPRINTF(buf, "SIG[%d]", signum);
}
return buf;
}
void us_install_signals_handler(us_signal_handler_f handler, bool ignore_sigpipe) {
struct sigaction sig_act = {0};
assert(!sigemptyset(&sig_act.sa_mask));
sig_act.sa_handler = handler;
assert(!sigaddset(&sig_act.sa_mask, SIGINT));
assert(!sigaddset(&sig_act.sa_mask, SIGTERM));
if (!ignore_sigpipe) {
assert(!sigaddset(&sig_act.sa_mask, SIGPIPE));
}
US_LOG_DEBUG("Installing SIGINT handler ...");
assert(!sigaction(SIGINT, &sig_act, NULL));
US_LOG_DEBUG("Installing SIGTERM handler ...");
assert(!sigaction(SIGTERM, &sig_act, NULL));
if (!ignore_sigpipe) {
US_LOG_DEBUG("Installing SIGPIPE handler ...");
assert(!sigaction(SIGPIPE, &sig_act, NULL));
} else {
US_LOG_DEBUG("Ignoring SIGPIPE ...");
assert(signal(SIGPIPE, SIG_IGN) != SIG_ERR);
}
}

32
src/libs/signal.h Normal file
View File

@@ -0,0 +1,32 @@
/*****************************************************************************
# #
# uStreamer - Lightweight and fast MJPEG-HTTP streamer. #
# #
# Copyright (C) 2018-2023 Maxim Devaev <mdevaev@gmail.com> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <https://www.gnu.org/licenses/>. #
# #
*****************************************************************************/
#pragma once
#include "types.h"
typedef void (*us_signal_handler_f)(int);
char *us_signum_to_string(int signum);
void us_install_signals_handler(us_signal_handler_f handler, bool ignore_sigpipe);

View File

@@ -24,6 +24,7 @@
#include <stdio.h>
#include <unistd.h>
#include <signal.h>
#include <assert.h>
#include <sys/syscall.h>
@@ -56,9 +57,14 @@
us_thread_set_name(m_new_tname_buf); \
}
#else
# define US_THREAD_RENAME(_fmt, ...)
# define US_THREAD_RENAME(x_fmt, ...)
#endif
#define US_THREAD_SETTLE(x_fmt, ...) { \
US_THREAD_RENAME((x_fmt), ##__VA_ARGS__); \
us_thread_block_signals(); \
}
#define US_MUTEX_INIT(x_mutex) assert(!pthread_mutex_init(&(x_mutex), NULL))
#define US_MUTEX_DESTROY(x_mutex) assert(!pthread_mutex_destroy(&(x_mutex)))
#define US_MUTEX_LOCK(x_mutex) assert(!pthread_mutex_lock(&(x_mutex)))
@@ -124,3 +130,11 @@ INLINE void us_thread_get_name(char *name) { // Always required for logging
}
#endif
}
INLINE void us_thread_block_signals(void) {
sigset_t mask;
assert(!sigemptyset(&mask));
assert(!sigaddset(&mask, SIGINT));
assert(!sigaddset(&mask, SIGTERM));
assert(!pthread_sigmask(SIG_BLOCK, &mask, NULL));
}

View File

@@ -27,6 +27,7 @@
#include <string.h>
#include <unistd.h>
#include <limits.h>
#include <locale.h> // Make C locale for strerror_l()
#include <errno.h>
#include <math.h>
#include <time.h>
@@ -34,12 +35,6 @@
#include <sys/file.h>
#if defined(__GLIBC__) && __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 32
# define HAS_SIGABBREV_NP
#else
# include <signal.h>
#endif
#include "types.h"
@@ -181,34 +176,26 @@ INLINE int us_flock_timedwait_monotonic(int fd, ldf timeout) {
}
INLINE char *us_errno_to_string(int error) {
# if (_POSIX_C_SOURCE >= 200112L) && !defined(_GNU_SOURCE) // XSI
char buf[2048];
const uz max_len = sizeof(buf) - 1;
# if (_POSIX_C_SOURCE >= 200112L) && ! _GNU_SOURCE
if (strerror_r(error, buf, max_len) != 0) {
US_SNPRINTF(buf, max_len, "Errno = %d", error);
}
return us_strdup(buf);
# else
return us_strdup(strerror_r(error, buf, max_len));
# endif
}
INLINE char *us_signum_to_string(int signum) {
# ifdef HAS_SIGABBREV_NP
const char *const name = sigabbrev_np(signum);
# else
const char *const name = (
signum == SIGTERM ? "TERM" :
signum == SIGINT ? "INT" :
signum == SIGPIPE ? "PIPE" :
NULL
);
# endif
char *buf;
if (name != NULL) {
US_ASPRINTF(buf, "SIG%s", name);
} else {
US_ASPRINTF(buf, "SIG[%d]", signum);
# elif defined(__GLIBC__) && defined(_GNU_SOURCE) // GNU
char buf[2048];
const uz max_len = sizeof(buf) - 1;
return us_strdup(strerror_r(error, buf, max_len));
# else // BSD
locale_t locale = newlocale(LC_MESSAGES_MASK, "C", NULL);
if (locale) {
char *ptr = us_strdup(strerror_l(error, locale));
freelocale(locale);
return ptr;
}
return buf;
return us_strdup("!!! newlocale() error !!!");
# endif
}

View File

@@ -54,7 +54,7 @@ int us_unjpeg(const us_frame_s *src, us_frame_s *dest, bool decode) {
// https://stackoverflow.com/questions/19857766/error-handling-in-libjpeg
_jpeg_error_manager_s jpeg_error;
jpeg.err = jpeg_std_error((struct jpeg_error_mgr *)&jpeg_error);
jpeg.err = jpeg_std_error((struct jpeg_error_mgr*)&jpeg_error);
jpeg_error.mgr.error_exit = _jpeg_error_handler;
jpeg_error.frame = src;
if (setjmp(jpeg_error.jmp) < 0) {
@@ -94,7 +94,7 @@ done:
}
static void _jpeg_error_handler(j_common_ptr jpeg) {
_jpeg_error_manager_s *jpeg_error = (_jpeg_error_manager_s *)jpeg->err;
_jpeg_error_manager_s *jpeg_error = (_jpeg_error_manager_s*)jpeg->err;
char msg[JMSG_LENGTH_MAX];
(*jpeg_error->mgr.format_message)(jpeg, msg);

View File

@@ -73,13 +73,13 @@ void us_encoder_destroy(us_encoder_s *enc) {
free(enc);
}
us_encoder_type_e us_encoder_parse_type(const char *str) {
int us_encoder_parse_type(const char *str) {
US_ARRAY_ITERATE(_ENCODER_TYPES, 0, item, {
if (!strcasecmp(item->name, str)) {
return item->type;
}
});
return US_ENCODER_TYPE_UNKNOWN;
return -1;
}
const char *us_encoder_type_to_string(us_encoder_type_e type) {
@@ -91,7 +91,9 @@ const char *us_encoder_type_to_string(us_encoder_type_e type) {
return _ENCODER_TYPES[0].name;
}
us_workers_pool_s *us_encoder_workers_pool_init(us_encoder_s *enc, us_device_s *dev) {
void us_encoder_open(us_encoder_s *enc, us_device_s *dev) {
assert(enc->run->pool == NULL);
# define DR(x_next) dev->run->x_next
us_encoder_type_e type = (_ER(cpu_forced) ? US_ENCODER_TYPE_CPU : enc->type);
@@ -162,7 +164,7 @@ us_workers_pool_s *us_encoder_workers_pool_init(us_encoder_s *enc, us_device_s *
: 0
);
return us_workers_pool_init(
enc->run->pool = us_workers_pool_init(
"JPEG", "jw", n_workers, desired_interval,
_worker_job_init, (void *)enc,
_worker_job_destroy,
@@ -171,6 +173,11 @@ us_workers_pool_s *us_encoder_workers_pool_init(us_encoder_s *enc, us_device_s *
# undef DR
}
void us_encoder_close(us_encoder_s *enc) {
assert(enc->run->pool != NULL);
US_DELETE(enc->run->pool, us_workers_pool_destroy);
}
void us_encoder_get_runtime_params(us_encoder_s *enc, us_encoder_type_e *type, unsigned *quality) {
US_MUTEX_LOCK(_ER(mutex));
*type = _ER(type);
@@ -198,8 +205,6 @@ static bool _worker_run_job(us_worker_s *wr) {
const us_frame_s *src = &job->hw->raw;
us_frame_s *dest = job->dest;
assert(_ER(type) != US_ENCODER_TYPE_UNKNOWN);
if (_ER(type) == US_ENCODER_TYPE_CPU) {
US_LOG_VERBOSE("Compressing JPEG using CPU: worker=%s, buffer=%u",
wr->name, job->hw->buf.index);
@@ -223,6 +228,9 @@ static bool _worker_run_job(us_worker_s *wr) {
us_frame_encoding_begin(src, dest, V4L2_PIX_FMT_JPEG);
usleep(5000); // Просто чтобы работала логика desired_fps
dest->encode_end_ts = us_get_now_monotonic(); // us_frame_encoding_end()
} else {
assert(0 && "Unknown encoder type");
}
US_LOG_VERBOSE("Compressed new JPEG: size=%zu, time=%0.3Lf, worker=%s, buffer=%u",

View File

@@ -47,7 +47,6 @@
#define ENCODER_TYPES_STR "CPU, HW, M2M-VIDEO, M2M-IMAGE, NOOP"
typedef enum {
US_ENCODER_TYPE_UNKNOWN, // Only for us_encoder_parse_type() and main()
US_ENCODER_TYPE_CPU,
US_ENCODER_TYPE_HW,
US_ENCODER_TYPE_M2M_VIDEO,
@@ -63,6 +62,8 @@ typedef struct {
unsigned n_m2ms;
us_m2m_encoder_s **m2ms;
us_workers_pool_s *pool;
} us_encoder_runtime_s;
typedef struct {
@@ -83,10 +84,10 @@ typedef struct {
us_encoder_s *us_encoder_init(void);
void us_encoder_destroy(us_encoder_s *enc);
us_encoder_type_e us_encoder_parse_type(const char *str);
int us_encoder_parse_type(const char *str);
const char *us_encoder_type_to_string(us_encoder_type_e type);
us_workers_pool_s *us_encoder_workers_pool_init(us_encoder_s *enc, us_device_s *dev);
void us_encoder_get_runtime_params(us_encoder_s *enc, us_encoder_type_e *type, unsigned *quality);
void us_encoder_open(us_encoder_s *enc, us_device_s *dev);
void us_encoder_close(us_encoder_s *enc);
int us_encoder_compress(us_encoder_s *enc, unsigned worker_number, us_frame_s *src, us_frame_s *dest);
void us_encoder_get_runtime_params(us_encoder_s *enc, us_encoder_type_e *type, unsigned *quality);

View File

@@ -22,8 +22,19 @@
#include "h264.h"
#include <stdatomic.h>
us_h264_stream_s *us_h264_stream_init(us_memsink_s *sink, const char *path, unsigned bitrate, unsigned gop) {
#include "../libs/types.h"
#include "../libs/tools.h"
#include "../libs/logging.h"
#include "../libs/frame.h"
#include "../libs/memsink.h"
#include "../libs/unjpeg.h"
#include "m2m.h"
us_h264_stream_s *us_h264_stream_init(us_memsink_s *sink, const char *path, uint bitrate, uint gop) {
us_h264_stream_s *h264;
US_CALLOC(h264, 1);
h264->sink = sink;
@@ -42,18 +53,15 @@ void us_h264_stream_destroy(us_h264_stream_s *h264) {
}
void us_h264_stream_process(us_h264_stream_s *h264, const us_frame_s *frame, bool force_key) {
if (!us_memsink_server_check(h264->sink, frame)) {
return;
}
if (us_is_jpeg(frame->format)) {
const long double now = us_get_now_monotonic();
const ldf now_ts = us_get_now_monotonic();
US_LOG_DEBUG("H264: Input frame is JPEG; decoding ...");
if (us_unjpeg(frame, h264->tmp_src, true) < 0) {
atomic_store(&h264->online, false);
return;
}
frame = h264->tmp_src;
US_LOG_VERBOSE("H264: JPEG decoded; time=%.3Lf", us_get_now_monotonic() - now);
US_LOG_VERBOSE("H264: JPEG decoded; time=%.3Lf", us_get_now_monotonic() - now_ts);
}
if (h264->key_requested) {

View File

@@ -22,15 +22,12 @@
#pragma once
#include <stdbool.h>
#include <stdatomic.h>
#include <assert.h>
#include "../libs/tools.h"
#include "../libs/logging.h"
#include "../libs/types.h"
#include "../libs/frame.h"
#include "../libs/memsink.h"
#include "../libs/unjpeg.h"
#include "m2m.h"
@@ -44,6 +41,6 @@ typedef struct {
} us_h264_stream_s;
us_h264_stream_s *us_h264_stream_init(us_memsink_s *sink, const char *path, unsigned bitrate, unsigned gop);
us_h264_stream_s *us_h264_stream_init(us_memsink_s *sink, const char *path, uint bitrate, uint gop);
void us_h264_stream_destroy(us_h264_stream_s *h264);
void us_h264_stream_process(us_h264_stream_s *h264, const us_frame_s *frame, bool force_key);

View File

@@ -22,6 +22,14 @@
#include "bev.h"
#include <string.h>
#include <errno.h>
#include <event2/util.h>
#include <event2/bufferevent.h>
#include "../../libs/tools.h"
char *us_bufferevent_format_reason(short what) {
char *reason;
@@ -34,6 +42,7 @@ char *us_bufferevent_format_reason(short what) {
strncat(reason, perror_str, 1023);
free(perror_str);
strcat(reason, " (");
# define FILL_REASON(x_bev, x_name) { \
if (what & x_bev) { \
if (first) { \
@@ -44,7 +53,6 @@ char *us_bufferevent_format_reason(short what) {
strcat(reason, x_name); \
} \
}
FILL_REASON(BEV_EVENT_READING, "reading");
FILL_REASON(BEV_EVENT_WRITING, "writing");
FILL_REASON(BEV_EVENT_ERROR, "error");

View File

@@ -22,14 +22,5 @@
#pragma once
#include <string.h>
#include <errno.h>
#include <event2/util.h>
#include <event2/bufferevent.h>
#include "../../libs/tools.h"
#include "../../libs/logging.h"
char *us_bufferevent_format_reason(short what);

View File

@@ -22,6 +22,13 @@
#include "mime.h"
#include <string.h>
#include <event2/util.h>
#include "../../libs/tools.h"
#include "../../libs/array.h"
static const struct {
const char *ext; // cppcheck-suppress unusedStructMember

View File

@@ -22,12 +22,5 @@
#pragma once
#include <string.h>
#include <event2/util.h>
#include "../../libs/tools.h"
#include "../../libs/array.h"
const char *us_guess_mime_type(const char *str);

View File

@@ -22,6 +22,14 @@
#include "path.h"
#ifdef TEST_HTTP_PATH
# include <stdio.h>
# include <stdlib.h>
#endif
#include <string.h>
#include "../../libs/tools.h"
char *us_simplify_request_path(const char *str) {
// Based on Lighttpd sources:

View File

@@ -22,13 +22,5 @@
#pragma once
#ifdef TEST_HTTP_PATH
# include <stdio.h>
# include <stdlib.h>
#endif
#include <string.h>
#include "../../libs/tools.h"
char *us_simplify_request_path(const char *str);

View File

@@ -22,6 +22,61 @@
#include "server.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdbool.h>
#include <stdatomic.h>
#include <string.h>
#include <inttypes.h>
#include <unistd.h>
#include <fcntl.h>
#include <assert.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <netinet/tcp.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <event2/util.h>
#include <event2/event.h>
#include <event2/thread.h>
#include <event2/http.h>
#include <event2/buffer.h>
#include <event2/bufferevent.h>
#include <event2/keyvalq_struct.h>
#ifndef EVTHREAD_USE_PTHREADS_IMPLEMENTED
# error Required libevent-pthreads support
#endif
#include "../../libs/types.h"
#include "../../libs/tools.h"
#include "../../libs/threading.h"
#include "../../libs/logging.h"
#include "../../libs/process.h"
#include "../../libs/frame.h"
#include "../../libs/base64.h"
#include "../../libs/list.h"
#include "../data/index_html.h"
#include "../data/favicon_ico.h"
#include "../encoder.h"
#include "../stream.h"
#ifdef WITH_GPIO
# include "../gpio/gpio.h"
#endif
#include "bev.h"
#include "unix.h"
#include "uri.h"
#include "mime.h"
#include "static.h"
#ifdef WITH_SYSTEMD
# include "systemd/systemd.h"
#endif
static int _http_preprocess_request(struct evhttp_request *request, us_server_s *server);
@@ -37,9 +92,9 @@ static void _http_callback_stream(struct evhttp_request *request, void *v_server
static void _http_callback_stream_write(struct bufferevent *buf_event, void *v_ctx);
static void _http_callback_stream_error(struct bufferevent *buf_event, short what, void *v_ctx);
static void _http_request_watcher(int fd, short event, void *v_server);
static void _http_refresher(int fd, short event, void *v_server);
static void _http_queue_send_stream(us_server_s *server, bool stream_updated, bool frame_updated);
static void _http_send_stream(us_server_s *server, bool stream_updated, bool frame_updated);
static void _http_send_snapshot(us_server_s *server);
static bool _expose_frame(us_server_s *server, const us_frame_s *frame);
@@ -47,10 +102,19 @@ static const char *_http_get_header(struct evhttp_request *request, const char *
static char *_http_get_client_hostport(struct evhttp_request *request);
#define _S_LOG_ERROR(x_msg, ...) US_LOG_ERROR("HTTP: " x_msg, ##__VA_ARGS__)
#define _S_LOG_PERROR(x_msg, ...) US_LOG_PERROR("HTTP: " x_msg, ##__VA_ARGS__)
#define _S_LOG_INFO(x_msg, ...) US_LOG_INFO("HTTP: " x_msg, ##__VA_ARGS__)
#define _S_LOG_VERBOSE(x_msg, ...) US_LOG_VERBOSE("HTTP: " x_msg, ##__VA_ARGS__)
#define _S_LOG_DEBUG(x_msg, ...) US_LOG_DEBUG("HTTP: " x_msg, ##__VA_ARGS__)
#define _A_EVBUFFER_NEW(x_buf) assert((x_buf = evbuffer_new()) != NULL)
#define _A_EVBUFFER_ADD(x_buf, x_data, x_size) assert(!evbuffer_add(x_buf, x_data, x_size))
#define _A_EVBUFFER_ADD_PRINTF(x_buf, x_fmt, ...) assert(evbuffer_add_printf(x_buf, x_fmt, ##__VA_ARGS__) >= 0)
#define _A_ADD_HEADER(x_request, x_key, x_value) \
assert(!evhttp_add_header(evhttp_request_get_output_headers(x_request), x_key, x_value))
us_server_s *us_server_init(us_stream_s *stream) {
us_server_exposed_s *exposed;
@@ -91,11 +155,6 @@ void us_server_destroy(us_server_s *server) {
event_free(run->refresher);
}
if (run->request_watcher != NULL) {
event_del(run->request_watcher);
event_free(run->request_watcher);
}
evhttp_free(run->http);
US_CLOSE_FD(run->ext_fd);
event_base_free(run->base);
@@ -104,6 +163,10 @@ void us_server_destroy(us_server_s *server) {
libevent_global_shutdown();
# endif
US_LIST_ITERATE(run->snapshot_clients, client, { // cppcheck-suppress constStatement
free(client);
});
US_LIST_ITERATE(run->stream_clients, client, { // cppcheck-suppress constStatement
free(client->key);
free(client->hostport);
@@ -125,29 +188,21 @@ int us_server_listen(us_server_s *server) {
{
if (server->static_path[0] != '\0') {
US_LOG_INFO("Enabling HTTP file server: %s", server->static_path);
evhttp_set_gencb(run->http, _http_callback_static, (void *)server);
_S_LOG_INFO("Enabling the file server: %s", server->static_path);
evhttp_set_gencb(run->http, _http_callback_static, (void*)server);
} else {
assert(!evhttp_set_cb(run->http, "/", _http_callback_root, (void *)server));
assert(!evhttp_set_cb(run->http, "/favicon.ico", _http_callback_favicon, (void *)server));
assert(!evhttp_set_cb(run->http, "/", _http_callback_root, (void*)server));
assert(!evhttp_set_cb(run->http, "/favicon.ico", _http_callback_favicon, (void*)server));
}
assert(!evhttp_set_cb(run->http, "/state", _http_callback_state, (void *)server));
assert(!evhttp_set_cb(run->http, "/snapshot", _http_callback_snapshot, (void *)server));
assert(!evhttp_set_cb(run->http, "/stream", _http_callback_stream, (void *)server));
assert(!evhttp_set_cb(run->http, "/state", _http_callback_state, (void*)server));
assert(!evhttp_set_cb(run->http, "/snapshot", _http_callback_snapshot, (void*)server));
assert(!evhttp_set_cb(run->http, "/stream", _http_callback_stream, (void*)server));
}
us_frame_copy(stream->run->blank->jpeg, ex->frame);
ex->notify_last_width = ex->frame->width;
ex->notify_last_height = ex->frame->height;
if (server->exit_on_no_clients > 0) {
run->last_request_ts = us_get_now_monotonic();
struct timeval interval = {0};
interval.tv_usec = 100000;
assert((run->request_watcher = event_new(run->base, -1, EV_PERSIST, _http_request_watcher, server)) != NULL);
assert(!event_add(run->request_watcher, &interval));
}
{
struct timeval interval = {0};
if (stream->dev->desired_fps > 0) {
@@ -166,17 +221,17 @@ int us_server_listen(us_server_s *server) {
char *raw_token;
US_ASPRINTF(raw_token, "%s:%s", server->user, server->passwd);
us_base64_encode((uint8_t *)raw_token, strlen(raw_token), &encoded_token, NULL);
us_base64_encode((u8*)raw_token, strlen(raw_token), &encoded_token, NULL);
free(raw_token);
US_ASPRINTF(run->auth_token, "Basic %s", encoded_token);
free(encoded_token);
US_LOG_INFO("Using HTTP basic auth");
_S_LOG_INFO("Using HTTP basic auth");
}
if (server->unix_path[0] != '\0') {
US_LOG_DEBUG("Binding HTTP to UNIX socket '%s' ...", server->unix_path);
_S_LOG_DEBUG("Binding server to UNIX socket '%s' ...", server->unix_path);
if ((run->ext_fd = us_evhttp_bind_unix(
run->http,
server->unix_path,
@@ -185,57 +240,55 @@ int us_server_listen(us_server_s *server) {
) {
return -1;
}
US_LOG_INFO("Listening HTTP on UNIX socket '%s'", server->unix_path);
_S_LOG_INFO("Listening HTTP on UNIX socket '%s'", server->unix_path);
# ifdef WITH_SYSTEMD
} else if (server->systemd) {
US_LOG_DEBUG("Binding HTTP to systemd socket ...");
_S_LOG_DEBUG("Binding HTTP to systemd socket ...");
if ((run->ext_fd = us_evhttp_bind_systemd(run->http)) < 0) {
return -1;
}
US_LOG_INFO("Listening systemd socket ...");
_S_LOG_INFO("Listening systemd socket ...");
# endif
} else {
US_LOG_DEBUG("Binding HTTP to [%s]:%u ...", server->host, server->port);
_S_LOG_DEBUG("Binding HTTP to [%s]:%u ...", server->host, server->port);
if (evhttp_bind_socket(run->http, server->host, server->port) < 0) {
US_LOG_PERROR("Can't bind HTTP on [%s]:%u", server->host, server->port)
_S_LOG_PERROR("Can't bind HTTP on [%s]:%u", server->host, server->port)
return -1;
}
US_LOG_INFO("Listening HTTP on [%s]:%u", server->host, server->port);
_S_LOG_INFO("Listening HTTP on [%s]:%u", server->host, server->port);
}
return 0;
}
void us_server_loop(us_server_s *server) {
US_LOG_INFO("Starting HTTP eventloop ...");
_S_LOG_INFO("Starting eventloop ...");
event_base_dispatch(server->run->base);
US_LOG_INFO("HTTP eventloop stopped");
_S_LOG_INFO("Eventloop stopped");
}
void us_server_loop_break(us_server_s *server) {
event_base_loopbreak(server->run->base);
}
#define ADD_HEADER(x_key, x_value) assert(!evhttp_add_header(evhttp_request_get_output_headers(request), x_key, x_value))
static int _http_preprocess_request(struct evhttp_request *request, us_server_s *server) {
us_server_runtime_s *const run = server->run;
const us_server_runtime_s *const run = server->run;
run->last_request_ts = us_get_now_monotonic();
atomic_store(&server->stream->run->http_last_request_ts, us_get_now_monotonic());
if (server->allow_origin[0] != '\0') {
const char *const cors_headers = _http_get_header(request, "Access-Control-Request-Headers");
const char *const cors_method = _http_get_header(request, "Access-Control-Request-Method");
ADD_HEADER("Access-Control-Allow-Origin", server->allow_origin);
ADD_HEADER("Access-Control-Allow-Credentials", "true");
_A_ADD_HEADER(request, "Access-Control-Allow-Origin", server->allow_origin);
_A_ADD_HEADER(request, "Access-Control-Allow-Credentials", "true");
if (cors_headers != NULL) {
ADD_HEADER("Access-Control-Allow-Headers", cors_headers);
_A_ADD_HEADER(request, "Access-Control-Allow-Headers", cors_headers);
}
if (cors_method != NULL) {
ADD_HEADER("Access-Control-Allow-Methods", cors_method);
_A_ADD_HEADER(request, "Access-Control-Allow-Methods", cors_method);
}
}
@@ -246,9 +299,8 @@ static int _http_preprocess_request(struct evhttp_request *request, us_server_s
if (run->auth_token != NULL) {
const char *const token = _http_get_header(request, "Authorization");
if (token == NULL || strcmp(token, run->auth_token) != 0) {
ADD_HEADER("WWW-Authenticate", "Basic realm=\"Restricted area\"");
_A_ADD_HEADER(request, "WWW-Authenticate", "Basic realm=\"Restricted area\"");
evhttp_send_reply(request, 401, "Unauthorized", NULL);
return -1;
}
@@ -258,7 +310,6 @@ static int _http_preprocess_request(struct evhttp_request *request, us_server_s
evhttp_send_reply(request, HTTP_OK, "OK", NULL);
return -1;
}
return 0;
}
@@ -296,7 +347,7 @@ static int _http_check_run_compat_action(struct evhttp_request *request, void *v
}
static void _http_callback_root(struct evhttp_request *request, void *v_server) {
us_server_s *const server = (us_server_s *)v_server;
us_server_s *const server = v_server;
PREPROCESS_REQUEST;
COMPAT_REQUEST;
@@ -304,28 +355,28 @@ static void _http_callback_root(struct evhttp_request *request, void *v_server)
struct evbuffer *buf;
_A_EVBUFFER_NEW(buf);
_A_EVBUFFER_ADD_PRINTF(buf, "%s", US_HTML_INDEX_PAGE);
ADD_HEADER("Content-Type", "text/html");
_A_ADD_HEADER(request, "Content-Type", "text/html");
evhttp_send_reply(request, HTTP_OK, "OK", buf);
evbuffer_free(buf);
}
static void _http_callback_favicon(struct evhttp_request *request, void *v_server) {
us_server_s *const server = (us_server_s *)v_server;
us_server_s *const server = v_server;
PREPROCESS_REQUEST;
struct evbuffer *buf;
_A_EVBUFFER_NEW(buf);
_A_EVBUFFER_ADD(buf, (const void *)US_FAVICON_ICO_DATA, US_FAVICON_ICO_DATA_SIZE);
ADD_HEADER("Content-Type", "image/x-icon");
_A_EVBUFFER_ADD(buf, (const void*)US_FAVICON_ICO_DATA, US_FAVICON_ICO_DATA_SIZE);
_A_ADD_HEADER(request, "Content-Type", "image/x-icon");
evhttp_send_reply(request, HTTP_OK, "OK", buf);
evbuffer_free(buf);
}
static void _http_callback_static(struct evhttp_request *request, void *v_server) {
us_server_s *const server = (us_server_s *)v_server;
us_server_s *const server = v_server;
PREPROCESS_REQUEST;
COMPAT_REQUEST;
@@ -341,7 +392,7 @@ static void _http_callback_static(struct evhttp_request *request, void *v_server
if ((uri = evhttp_uri_parse(evhttp_request_get_uri(request))) == NULL) {
goto bad_request;
}
if ((uri_path = (char *)evhttp_uri_get_path(uri)) == NULL) {
if ((uri_path = (char*)evhttp_uri_get_path(uri)) == NULL) {
uri_path = "/";
}
if ((decoded_path = evhttp_uridecode(uri_path, 0, NULL)) == NULL) {
@@ -356,19 +407,18 @@ static void _http_callback_static(struct evhttp_request *request, void *v_server
}
if ((fd = open(static_path, O_RDONLY)) < 0) {
US_LOG_PERROR("HTTP: Can't open found static file %s", static_path);
_S_LOG_PERROR("Can't open found static file %s", static_path);
goto not_found;
}
{
struct stat st;
if (fstat(fd, &st) < 0) {
US_LOG_PERROR("HTTP: Can't stat() found static file %s", static_path);
_S_LOG_PERROR("Can't stat() found static file %s", static_path);
goto not_found;
}
if (st.st_size > 0 && evbuffer_add_file(buf, fd, 0, st.st_size) < 0) {
US_LOG_ERROR("HTTP: Can't serve static file %s", static_path);
_S_LOG_ERROR("Can't serve static file %s", static_path);
goto not_found;
}
@@ -376,7 +426,7 @@ static void _http_callback_static(struct evhttp_request *request, void *v_server
// and will close it when finished transferring data
fd = -1;
ADD_HEADER("Content-Type", us_guess_mime_type(static_path));
_A_ADD_HEADER(request, "Content-Type", us_guess_mime_type(static_path));
evhttp_send_reply(request, HTTP_OK, "OK", buf);
goto cleanup;
}
@@ -400,7 +450,7 @@ cleanup:
#undef COMPAT_REQUEST
static void _http_callback_state(struct evhttp_request *request, void *v_server) {
us_server_s *const server = (us_server_s *)v_server;
us_server_s *const server = v_server;
us_server_runtime_s *const run = server->run;
us_server_exposed_s *const ex = run->exposed;
us_stream_s *const stream = server->stream;
@@ -408,7 +458,7 @@ static void _http_callback_state(struct evhttp_request *request, void *v_server)
PREPROCESS_REQUEST;
us_encoder_type_e enc_type;
unsigned enc_quality;
uint enc_quality;
us_encoder_get_runtime_params(stream->enc, &enc_type, &enc_quality);
struct evbuffer *buf;
@@ -432,33 +482,38 @@ static void _http_callback_state(struct evhttp_request *request, void *v_server)
);
}
if (stream->sink != NULL || stream->h264_sink != NULL) {
if (stream->jpeg_sink != NULL || stream->h264_sink != NULL) {
_A_EVBUFFER_ADD_PRINTF(buf, " \"sinks\": {");
if (stream->sink != NULL) {
if (stream->jpeg_sink != NULL) {
_A_EVBUFFER_ADD_PRINTF(buf,
"\"jpeg\": {\"has_clients\": %s}",
us_bool_to_string(atomic_load(&stream->sink->has_clients))
us_bool_to_string(atomic_load(&stream->jpeg_sink->has_clients))
);
}
if (stream->h264_sink != NULL) {
_A_EVBUFFER_ADD_PRINTF(buf,
"%s\"h264\": {\"has_clients\": %s}",
(stream->sink ? ", " : ""),
(stream->jpeg_sink ? ", " : ""),
us_bool_to_string(atomic_load(&stream->h264_sink->has_clients))
);
}
_A_EVBUFFER_ADD_PRINTF(buf, "},");
}
uint width;
uint height;
bool online;
uint captured_fps;
us_stream_get_capture_state(stream, &width, &height, &online, &captured_fps);
_A_EVBUFFER_ADD_PRINTF(buf,
" \"source\": {\"resolution\": {\"width\": %u, \"height\": %u},"
" \"online\": %s, \"desired_fps\": %u, \"captured_fps\": %u},"
" \"stream\": {\"queued_fps\": %u, \"clients\": %u, \"clients_stat\": {",
(server->fake_width ? server->fake_width : ex->frame->width),
(server->fake_height ? server->fake_height : ex->frame->height),
us_bool_to_string(ex->frame->online),
(server->fake_width ? server->fake_width : width),
(server->fake_height ? server->fake_height : height),
us_bool_to_string(online),
stream->dev->desired_fps,
atomic_load(&stream->run->captured_fps),
captured_fps,
ex->queued_fps,
run->stream_clients_count
);
@@ -480,62 +535,26 @@ static void _http_callback_state(struct evhttp_request *request, void *v_server)
_A_EVBUFFER_ADD_PRINTF(buf, "}}}}");
ADD_HEADER("Content-Type", "application/json");
_A_ADD_HEADER(request, "Content-Type", "application/json");
evhttp_send_reply(request, HTTP_OK, "OK", buf);
evbuffer_free(buf);
}
static void _http_callback_snapshot(struct evhttp_request *request, void *v_server) {
us_server_s *const server = (us_server_s *)v_server;
us_server_exposed_s *const ex = server->run->exposed;
us_server_s *const server = v_server;
PREPROCESS_REQUEST;
struct evbuffer *buf;
_A_EVBUFFER_NEW(buf);
_A_EVBUFFER_ADD(buf, (const void*)ex->frame->data, ex->frame->used);
us_snapshot_client_s *client;
US_CALLOC(client, 1);
client->server = server;
client->request = request;
client->request_ts = us_get_now_monotonic();
ADD_HEADER("Cache-Control", "no-store, no-cache, must-revalidate, proxy-revalidate, pre-check=0, post-check=0, max-age=0");
ADD_HEADER("Pragma", "no-cache");
ADD_HEADER("Expires", "Mon, 3 Jan 2000 12:34:56 GMT");
char header_buf[256];
# define ADD_TIME_HEADER(x_key, x_value) { \
US_SNPRINTF(header_buf, 255, "%.06Lf", x_value); \
ADD_HEADER(x_key, header_buf); \
}
# define ADD_UNSIGNED_HEADER(x_key, x_value) { \
US_SNPRINTF(header_buf, 255, "%u", x_value); \
ADD_HEADER(x_key, header_buf); \
}
ADD_TIME_HEADER("X-Timestamp", us_get_now_real());
ADD_HEADER("X-UStreamer-Online", us_bool_to_string(ex->frame->online));
ADD_UNSIGNED_HEADER("X-UStreamer-Dropped", ex->dropped);
ADD_UNSIGNED_HEADER("X-UStreamer-Width", ex->frame->width);
ADD_UNSIGNED_HEADER("X-UStreamer-Height", ex->frame->height);
ADD_TIME_HEADER("X-UStreamer-Grab-Timestamp", ex->frame->grab_ts);
ADD_TIME_HEADER("X-UStreamer-Encode-Begin-Timestamp", ex->frame->encode_begin_ts);
ADD_TIME_HEADER("X-UStreamer-Encode-End-Timestamp", ex->frame->encode_end_ts);
ADD_TIME_HEADER("X-UStreamer-Expose-Begin-Timestamp", ex->expose_begin_ts);
ADD_TIME_HEADER("X-UStreamer-Expose-Cmp-Timestamp", ex->expose_cmp_ts);
ADD_TIME_HEADER("X-UStreamer-Expose-End-Timestamp", ex->expose_end_ts);
ADD_TIME_HEADER("X-UStreamer-Send-Timestamp", us_get_now_monotonic());
# undef ADD_UNSUGNED_HEADER
# undef ADD_TIME_HEADER
ADD_HEADER("Content-Type", "image/jpeg");
evhttp_send_reply(request, HTTP_OK, "OK", buf);
evbuffer_free(buf);
atomic_fetch_add(&server->stream->run->http_snapshot_requested, 1);
US_LIST_APPEND(server->run->snapshot_clients, client);
}
#undef ADD_HEADER
static void _http_callback_stream(struct evhttp_request *request, void *v_server) {
// https://github.com/libevent/libevent/blob/29cc8386a2f7911eaa9336692a2c5544d8b4734f/http.c#L2814
// https://github.com/libevent/libevent/blob/29cc8386a2f7911eaa9336692a2c5544d8b4734f/http.c#L2789
@@ -543,7 +562,7 @@ static void _http_callback_stream(struct evhttp_request *request, void *v_server
// https://github.com/libevent/libevent/blob/29cc8386a2f7911eaa9336692a2c5544d8b4734f/http.c#L791
// https://github.com/libevent/libevent/blob/29cc8386a2f7911eaa9336692a2c5544d8b4734f/http.c#L1458
us_server_s *const server = (us_server_s *)v_server;
us_server_s *const server = v_server;
us_server_runtime_s *const run = server->run;
PREPROCESS_REQUEST;
@@ -580,20 +599,20 @@ static void _http_callback_stream(struct evhttp_request *request, void *v_server
# endif
}
US_LOG_INFO("HTTP: NEW client (now=%u): %s, id=%" PRIx64,
_S_LOG_INFO("NEW client (now=%u): %s, id=%" PRIx64,
run->stream_clients_count, client->hostport, client->id);
struct bufferevent *const buf_event = evhttp_connection_get_bufferevent(conn);
if (server->tcp_nodelay && run->ext_fd >= 0) {
US_LOG_DEBUG("HTTP: Setting up TCP_NODELAY to the client %s ...", client->hostport);
_S_LOG_DEBUG("Setting up TCP_NODELAY to the client %s ...", client->hostport);
const evutil_socket_t fd = bufferevent_getfd(buf_event);
assert(fd >= 0);
int on = 1;
if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (void *)&on, sizeof(on)) != 0) {
US_LOG_PERROR("HTTP: Can't set TCP_NODELAY to the client %s", client->hostport);
if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (void*)&on, sizeof(on)) != 0) {
_S_LOG_PERROR("Can't set TCP_NODELAY to the client %s", client->hostport);
}
}
bufferevent_setcb(buf_event, NULL, NULL, _http_callback_stream_error, (void *)client);
bufferevent_setcb(buf_event, NULL, NULL, _http_callback_stream_error, (void*)client);
bufferevent_enable(buf_event, EV_READ);
} else {
evhttp_request_free(request);
@@ -603,17 +622,17 @@ static void _http_callback_stream(struct evhttp_request *request, void *v_server
#undef PREPROCESS_REQUEST
static void _http_callback_stream_write(struct bufferevent *buf_event, void *v_client) {
us_stream_client_s *const client = (us_stream_client_s *)v_client;
us_stream_client_s *const client = v_client;
us_server_s *const server = client->server;
us_server_exposed_s *const ex = server->run->exposed;
const long double now = us_get_now_monotonic();
const long long now_second = us_floor_ms(now);
const ldf now_ts = us_get_now_monotonic();
const sll now_sec_ts = us_floor_ms(now_ts);
if (now_second != client->fps_accum_second) {
if (now_sec_ts != client->fps_ts) {
client->fps = client->fps_accum;
client->fps_accum = 0;
client->fps_accum_second = now_second;
client->fps_ts = now_sec_ts;
}
client->fps_accum += 1;
@@ -724,8 +743,8 @@ static void _http_callback_stream_write(struct bufferevent *buf_event, void *v_c
ex->expose_begin_ts,
ex->expose_cmp_ts,
ex->expose_end_ts,
now,
now - ex->frame->grab_ts
now_ts,
now_ts - ex->frame->grab_ts
);
}
}
@@ -742,7 +761,7 @@ static void _http_callback_stream_write(struct bufferevent *buf_event, void *v_c
assert(!bufferevent_write_buffer(buf_event, buf));
evbuffer_free(buf);
bufferevent_setcb(buf_event, NULL, NULL, _http_callback_stream_error, (void *)client);
bufferevent_setcb(buf_event, NULL, NULL, _http_callback_stream_error, (void*)client);
bufferevent_enable(buf_event, EV_READ);
# undef ADD_ADVANCE_HEADERS
@@ -753,7 +772,7 @@ static void _http_callback_stream_error(struct bufferevent *buf_event, short wha
(void)buf_event;
(void)what;
us_stream_client_s *const client = (us_stream_client_s *)v_client;
us_stream_client_s *const client = v_client;
us_server_s *const server = client->server;
us_server_runtime_s *const run = server->run;
@@ -767,7 +786,7 @@ static void _http_callback_stream_error(struct bufferevent *buf_event, short wha
}
char *const reason = us_bufferevent_format_reason(what);
US_LOG_INFO("HTTP: DEL client (now=%u): %s, id=%" PRIx64 ", %s",
_S_LOG_INFO("DEL client (now=%u): %s, id=%" PRIx64 ", %s",
run->stream_clients_count, client->hostport, client->id, reason);
free(reason);
@@ -779,7 +798,7 @@ static void _http_callback_stream_error(struct bufferevent *buf_event, short wha
free(client);
}
static void _http_queue_send_stream(us_server_s *server, bool stream_updated, bool frame_updated) {
static void _http_send_stream(us_server_s *server, bool stream_updated, bool frame_updated) {
us_server_runtime_s *const run = server->run;
us_server_exposed_s *const ex = run->exposed;
@@ -805,7 +824,7 @@ static void _http_queue_send_stream(us_server_s *server, bool stream_updated, bo
if (dual_update || frame_updated || client->need_first_frame) {
struct bufferevent *const buf_event = evhttp_connection_get_bufferevent(conn);
bufferevent_setcb(buf_event, NULL, _http_callback_stream_write, _http_callback_stream_error, (void *)client);
bufferevent_setcb(buf_event, NULL, _http_callback_stream_write, _http_callback_stream_error, (void*)client);
bufferevent_enable(buf_event, EV_READ|EV_WRITE);
client->need_first_frame = false;
@@ -820,13 +839,13 @@ static void _http_queue_send_stream(us_server_s *server, bool stream_updated, bo
});
if (queued) {
static unsigned queued_fps_accum = 0;
static long long queued_fps_second = 0;
const long long now = us_floor_ms(us_get_now_monotonic());
if (now != queued_fps_second) {
static uint queued_fps_accum = 0;
static sll queued_fps_ts = 0;
const sll now_sec_ts = us_floor_ms(us_get_now_monotonic());
if (now_sec_ts != queued_fps_ts) {
ex->queued_fps = queued_fps_accum;
queued_fps_accum = 0;
queued_fps_second = now;
queued_fps_ts = now_sec_ts;
}
queued_fps_accum += 1;
} else if (!has_clients) {
@@ -834,29 +853,82 @@ static void _http_queue_send_stream(us_server_s *server, bool stream_updated, bo
}
}
static void _http_request_watcher(int fd, short what, void *v_server) {
(void)fd;
(void)what;
static void _http_send_snapshot(us_server_s *server) {
us_server_exposed_s *const ex = server->run->exposed;
us_blank_s *blank = NULL;
us_server_s *const server = (us_server_s *)v_server;
us_server_runtime_s *const run = server->run;
const long double now = us_get_now_monotonic();
# define ADD_TIME_HEADER(x_key, x_value) { \
US_SNPRINTF(header_buf, 255, "%.06Lf", x_value); \
_A_ADD_HEADER(request, x_key, header_buf); \
}
if (us_stream_has_clients(server->stream)) {
run->last_request_ts = now;
} else if (run->last_request_ts + server->exit_on_no_clients < now) {
US_LOG_INFO("HTTP: No requests or HTTP/sink clients found in last %u seconds, exiting ...",
server->exit_on_no_clients);
us_process_suicide();
run->last_request_ts = now;
}
# define ADD_UNSIGNED_HEADER(x_key, x_value) { \
US_SNPRINTF(header_buf, 255, "%u", x_value); \
_A_ADD_HEADER(request, x_key, header_buf); \
}
uint width;
uint height;
uint captured_fps; // Unused
bool online;
us_stream_get_capture_state(server->stream, &width, &height, &online, &captured_fps);
US_LIST_ITERATE(server->run->snapshot_clients, client, { // cppcheck-suppress constStatement
struct evhttp_request *request = client->request;
const bool has_fresh_snapshot = (atomic_load(&server->stream->run->http_snapshot_requested) == 0);
const bool timed_out = (client->request_ts + US_MAX((uint)1, server->stream->error_delay * 3) < us_get_now_monotonic());
if (has_fresh_snapshot || timed_out) {
us_frame_s *frame = ex->frame;
if (!online) {
if (blank == NULL) {
blank = us_blank_init();
us_blank_draw(blank, "< NO SIGNAL >", width, height);
}
frame = blank->jpeg;
}
struct evbuffer *buf;
_A_EVBUFFER_NEW(buf);
_A_EVBUFFER_ADD(buf, (const void*)frame->data, frame->used);
_A_ADD_HEADER(request, "Cache-Control", "no-store, no-cache, must-revalidate, proxy-revalidate, pre-check=0, post-check=0, max-age=0");
_A_ADD_HEADER(request, "Pragma", "no-cache");
_A_ADD_HEADER(request, "Expires", "Mon, 3 Jan 2000 12:34:56 GMT");
char header_buf[256];
ADD_TIME_HEADER("X-Timestamp", us_get_now_real());
_A_ADD_HEADER(request, "X-UStreamer-Online", us_bool_to_string(frame->online));
ADD_UNSIGNED_HEADER("X-UStreamer-Width", frame->width);
ADD_UNSIGNED_HEADER("X-UStreamer-Height", frame->height);
ADD_TIME_HEADER("X-UStreamer-Grab-Timestamp", frame->grab_ts);
ADD_TIME_HEADER("X-UStreamer-Encode-Begin-Timestamp", frame->encode_begin_ts);
ADD_TIME_HEADER("X-UStreamer-Encode-End-Timestamp", frame->encode_end_ts);
ADD_TIME_HEADER("X-UStreamer-Send-Timestamp", us_get_now_monotonic());
_A_ADD_HEADER(request, "Content-Type", "image/jpeg");
evhttp_send_reply(request, HTTP_OK, "OK", buf);
evbuffer_free(buf);
US_LIST_REMOVE(server->run->snapshot_clients, client);
free(client);
}
});
# undef ADD_UNSUGNED_HEADER
# undef ADD_TIME_HEADER
US_DELETE(blank, us_blank_destroy);
}
static void _http_refresher(int fd, short what, void *v_server) {
(void)fd;
(void)what;
us_server_s *server = (us_server_s *)v_server;
us_server_s *server = v_server;
us_server_exposed_s *ex = server->run->exposed;
us_ring_s *const ring = server->stream->run->http_jpeg_ring;
@@ -870,7 +942,7 @@ static void _http_refresher(int fd, short what, void *v_server) {
stream_updated = true;
us_ring_consumer_release(ring, ri);
} else if (ex->expose_end_ts + 1 < us_get_now_monotonic()) {
US_LOG_DEBUG("HTTP: Repeating exposed ...");
_S_LOG_DEBUG("Repeating exposed ...");
ex->expose_begin_ts = us_get_now_monotonic();
ex->expose_cmp_ts = ex->expose_begin_ts;
ex->expose_end_ts = ex->expose_begin_ts;
@@ -878,7 +950,8 @@ static void _http_refresher(int fd, short what, void *v_server) {
stream_updated = true;
}
_http_queue_send_stream(server, stream_updated, frame_updated);
_http_send_stream(server, stream_updated, frame_updated);
_http_send_snapshot(server);
if (
frame_updated
@@ -899,7 +972,7 @@ static void _http_refresher(int fd, short what, void *v_server) {
static bool _expose_frame(us_server_s *server, const us_frame_s *frame) {
us_server_exposed_s *const ex = server->run->exposed;
US_LOG_DEBUG("HTTP: Updating exposed frame (online=%d) ...", frame->online);
_S_LOG_DEBUG("Updating exposed frame (online=%d) ...", frame->online);
ex->expose_begin_ts = us_get_now_monotonic();
if (server->drop_same_frames && frame->online) {
@@ -911,13 +984,13 @@ static bool _expose_frame(us_server_s *server, const us_frame_s *frame) {
) {
ex->expose_cmp_ts = us_get_now_monotonic();
ex->expose_end_ts = ex->expose_cmp_ts;
US_LOG_VERBOSE("HTTP: Dropped same frame number %u; cmp_time=%.06Lf",
_S_LOG_VERBOSE("Dropped same frame number %u; cmp_time=%.06Lf",
ex->dropped, (ex->expose_cmp_ts - ex->expose_begin_ts));
ex->dropped += 1;
return false; // Not updated
} else {
ex->expose_cmp_ts = us_get_now_monotonic();
US_LOG_VERBOSE("HTTP: Passed same frame check (need_drop=%d, maybe_same=%d); cmp_time=%.06Lf",
_S_LOG_VERBOSE("Passed same frame check (need_drop=%d, maybe_same=%d); cmp_time=%.06Lf",
need_drop, maybe_same, (ex->expose_cmp_ts - ex->expose_begin_ts));
}
}
@@ -934,7 +1007,7 @@ static bool _expose_frame(us_server_s *server, const us_frame_s *frame) {
ex->expose_cmp_ts = ex->expose_begin_ts;
ex->expose_end_ts = us_get_now_monotonic();
US_LOG_VERBOSE("HTTP: Exposed frame: online=%d, exp_time=%.06Lf",
_S_LOG_VERBOSE("Exposed frame: online=%d, exp_time=%.06Lf",
ex->frame->online, (ex->expose_end_ts - ex->expose_begin_ts));
return true; // Updated
}
@@ -957,7 +1030,7 @@ static char *_http_get_client_hostport(struct evhttp_request *request) {
if (xff != NULL) {
US_DELETE(addr, free);
assert((addr = strndup(xff, 1024)) != NULL);
for (unsigned index = 0; addr[index]; ++index) {
for (uint index = 0; addr[index]; ++index) {
if (addr[index] == ',') {
addr[index] = '\0';
break;

View File

@@ -22,95 +22,61 @@
#pragma once
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdbool.h>
#include <stdatomic.h>
#include <string.h>
#include <inttypes.h>
#include <unistd.h>
#include <fcntl.h>
#include <assert.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <netinet/tcp.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <event2/util.h>
#include <event2/event.h>
#include <event2/thread.h>
#include <event2/http.h>
#include <event2/buffer.h>
#include <event2/bufferevent.h>
#include <event2/keyvalq_struct.h>
#ifndef EVTHREAD_USE_PTHREADS_IMPLEMENTED
# error Required libevent-pthreads support
#endif
#include "../../libs/tools.h"
#include "../../libs/threading.h"
#include "../../libs/logging.h"
#include "../../libs/process.h"
#include "../../libs/types.h"
#include "../../libs/frame.h"
#include "../../libs/base64.h"
#include "../../libs/list.h"
#include "../data/index_html.h"
#include "../data/favicon_ico.h"
#include "../encoder.h"
#include "../stream.h"
#ifdef WITH_GPIO
# include "../gpio/gpio.h"
#endif
#include "bev.h"
#include "unix.h"
#include "uri.h"
#include "mime.h"
#include "static.h"
#ifdef WITH_SYSTEMD
# include "systemd/systemd.h"
#endif
typedef struct us_stream_client_sx {
struct us_server_sx *server;
struct evhttp_request *request;
char *key;
bool extra_headers;
bool advance_headers;
bool dual_final_frames;
bool zero_data;
char *key;
bool extra_headers;
bool advance_headers;
bool dual_final_frames;
bool zero_data;
char *hostport;
uint64_t id;
bool need_initial;
bool need_first_frame;
bool updated_prev;
unsigned fps;
unsigned fps_accum;
long long fps_accum_second;
char *hostport;
u64 id;
bool need_initial;
bool need_first_frame;
bool updated_prev;
uint fps_accum;
sll fps_ts;
uint fps;
US_LIST_STRUCT(struct us_stream_client_sx);
} us_stream_client_s;
typedef struct {
us_frame_s *frame;
unsigned captured_fps;
unsigned queued_fps;
unsigned dropped;
long double expose_begin_ts;
long double expose_cmp_ts;
long double expose_end_ts;
typedef struct us_snapshot_client_sx {
struct us_server_sx *server;
struct evhttp_request *request;
ldf request_ts;
bool notify_last_online;
unsigned notify_last_width;
unsigned notify_last_height;
US_LIST_STRUCT(struct us_snapshot_client_sx);
} us_snapshot_client_s;
typedef struct {
us_frame_s *frame;
uint captured_fps;
uint queued_fps;
uint dropped;
ldf expose_begin_ts;
ldf expose_cmp_ts;
ldf expose_end_ts;
bool notify_last_online;
uint notify_last_width;
uint notify_last_height;
} us_server_exposed_s;
typedef struct {
@@ -120,45 +86,43 @@ typedef struct {
char *auth_token;
struct event *request_watcher;
long double last_request_ts;
struct event *refresher;
us_server_exposed_s *exposed;
us_stream_client_s *stream_clients;
unsigned stream_clients_count;
uint stream_clients_count;
us_snapshot_client_s *snapshot_clients;
} us_server_runtime_s;
typedef struct us_server_sx {
char *host;
unsigned port;
us_stream_s *stream;
char *unix_path;
bool unix_rm;
mode_t unix_mode;
char *host;
uint port;
char *unix_path;
bool unix_rm;
mode_t unix_mode;
# ifdef WITH_SYSTEMD
bool systemd;
bool systemd;
# endif
bool tcp_nodelay;
unsigned timeout;
bool tcp_nodelay;
uint timeout;
char *user;
char *passwd;
char *static_path;
char *allow_origin;
char *instance_id;
char *user;
char *passwd;
char *static_path;
char *allow_origin;
char *instance_id;
unsigned drop_same_frames;
unsigned fake_width;
unsigned fake_height;
uint drop_same_frames;
uint fake_width;
uint fake_height;
bool notify_parent;
unsigned exit_on_no_clients;
us_stream_s *stream;
bool notify_parent;
us_server_runtime_s *run;
} us_server_s;

View File

@@ -22,6 +22,19 @@
#include "static.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <assert.h>
#include <sys/stat.h>
#include "../../libs/tools.h"
#include "../../libs/logging.h"
#include "path.h"
char *us_find_static_file_path(const char *root_path, const char *request_path) {
char *path = NULL;

View File

@@ -22,18 +22,5 @@
#pragma once
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <assert.h>
#include <sys/stat.h>
#include "../../libs/tools.h"
#include "../../libs/logging.h"
#include "path.h"
char *us_find_static_file_path(const char *root_path, const char *request_path);

View File

@@ -22,11 +22,22 @@
#include "systemd.h"
#include <unistd.h>
#include <assert.h>
#include <event2/http.h>
#include <event2/util.h>
#include <systemd/sd-daemon.h>
#include "../../../libs/tools.h"
#include "../../../libs/logging.h"
evutil_socket_t us_evhttp_bind_systemd(struct evhttp *http) {
const int fds = sd_listen_fds(1);
if (fds < 1) {
US_LOG_ERROR("No available systemd sockets");
US_LOG_ERROR("HTTP: No available systemd sockets");
return -1;
}
@@ -39,7 +50,7 @@ evutil_socket_t us_evhttp_bind_systemd(struct evhttp *http) {
assert(!evutil_make_socket_nonblocking(fd));
if (evhttp_accept_socket(http, fd) < 0) {
US_LOG_PERROR("Can't evhttp_accept_socket() systemd socket");
US_LOG_PERROR("HTTP: Can't evhttp_accept_socket() systemd socket");
return -1;
}
return fd;

View File

@@ -22,16 +22,8 @@
#pragma once
#include <unistd.h>
#include <assert.h>
#include <event2/http.h>
#include <event2/util.h>
#include <systemd/sd-daemon.h>
#include "../../../libs/tools.h"
#include "../../../libs/logging.h"
evutil_socket_t us_evhttp_bind_systemd(struct evhttp *http);

View File

@@ -22,13 +22,29 @@
#include "unix.h"
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <sys/stat.h>
#include <event2/http.h>
#include <event2/util.h>
#include "../../libs/types.h"
#include "../../libs/tools.h"
#include "../../libs/logging.h"
evutil_socket_t us_evhttp_bind_unix(struct evhttp *http, const char *path, bool rm, mode_t mode) {
struct sockaddr_un addr = {0};
const size_t max_sun_path = sizeof(addr.sun_path) - 1;
const uz max_sun_path = sizeof(addr.sun_path) - 1;
if (strlen(path) > max_sun_path) {
US_LOG_ERROR("UNIX socket path is too long; max=%zu", max_sun_path);
US_LOG_ERROR("HTTP: UNIX socket path is too long; max=%zu", max_sun_path);
return -1;
}
@@ -41,24 +57,24 @@ evutil_socket_t us_evhttp_bind_unix(struct evhttp *http, const char *path, bool
if (rm && unlink(path) < 0) {
if (errno != ENOENT) {
US_LOG_PERROR("Can't remove old UNIX socket '%s'", path);
US_LOG_PERROR("HTTP: Can't remove old UNIX socket '%s'", path);
return -1;
}
}
if (bind(fd, (struct sockaddr *)&addr, sizeof(struct sockaddr_un)) < 0) {
US_LOG_PERROR("Can't bind HTTP to UNIX socket '%s'", path);
if (bind(fd, (struct sockaddr*)&addr, sizeof(struct sockaddr_un)) < 0) {
US_LOG_PERROR("HTTP: Can't bind HTTP to UNIX socket '%s'", path);
return -1;
}
if (mode && chmod(path, mode) < 0) {
US_LOG_PERROR("Can't set permissions %o to UNIX socket '%s'", mode, path);
US_LOG_PERROR("HTTP: Can't set permissions %o to UNIX socket '%s'", mode, path);
return -1;
}
if (listen(fd, 128) < 0) {
US_LOG_PERROR("Can't listen UNIX socket '%s'", path);
US_LOG_PERROR("HTTP: Can't listen UNIX socket '%s'", path);
return -1;
}
if (evhttp_accept_socket(http, fd) < 0) {
US_LOG_PERROR("Can't evhttp_accept_socket() UNIX socket '%s'", path);
US_LOG_PERROR("HTTP: Can't evhttp_accept_socket() UNIX socket '%s'", path);
return -1;
}
return fd;

View File

@@ -22,21 +22,12 @@
#pragma once
#include <stdbool.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <sys/stat.h>
#include <event2/http.h>
#include <event2/util.h>
#include "../../libs/tools.h"
#include "../../libs/logging.h"
#include "../../libs/types.h"
evutil_socket_t us_evhttp_bind_unix(struct evhttp *http, const char *path, bool rm, mode_t mode);

View File

@@ -22,6 +22,12 @@
#include "uri.h"
#include <event2/util.h>
#include <event2/http.h>
#include <event2/keyvalq_struct.h>
#include "../../libs/types.h"
bool us_uri_get_true(struct evkeyvalq *params, const char *key) {
const char *value_str = evhttp_find_header(params, key);

View File

@@ -22,12 +22,10 @@
#pragma once
#include <stdbool.h>
#include <event2/util.h>
#include <event2/http.h>
#include <event2/keyvalq_struct.h>
#include "../../libs/types.h"
bool us_uri_get_true(struct evkeyvalq *params, const char *key);
char *us_uri_get_string(struct evkeyvalq *params, const char *key);

View File

@@ -22,16 +22,34 @@
#include "m2m.h"
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <fcntl.h>
#include <poll.h>
#include <errno.h>
#include <assert.h>
#include <sys/mman.h>
#include <linux/videodev2.h>
#include "../libs/types.h"
#include "../libs/tools.h"
#include "../libs/logging.h"
#include "../libs/frame.h"
#include "../libs/xioctl.h"
static us_m2m_encoder_s *_m2m_encoder_init(
const char *name, const char *path, unsigned output_format,
unsigned fps, unsigned bitrate, unsigned gop, unsigned quality, bool allow_dma);
const char *name, const char *path, uint output_format,
uint bitrate, uint gop, uint quality, bool allow_dma);
static void _m2m_encoder_prepare(us_m2m_encoder_s *enc, const us_frame_s *frame);
static void _m2m_encoder_ensure(us_m2m_encoder_s *enc, const us_frame_s *frame);
static int _m2m_encoder_init_buffers(
us_m2m_encoder_s *enc, const char *name, enum v4l2_buf_type type,
us_m2m_buffer_s **bufs_ptr, unsigned *n_bufs_ptr, bool dma);
us_m2m_buffer_s **bufs_ptr, uint *n_bufs_ptr, bool dma);
static void _m2m_encoder_cleanup(us_m2m_encoder_s *enc);
@@ -44,18 +62,13 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
#define _E_LOG_VERBOSE(x_msg, ...) US_LOG_VERBOSE("%s: " x_msg, enc->name, ##__VA_ARGS__)
#define _E_LOG_DEBUG(x_msg, ...) US_LOG_DEBUG("%s: " x_msg, enc->name, ##__VA_ARGS__)
#define _RUN(x_next) enc->run->x_next
us_m2m_encoder_s *us_m2m_h264_encoder_init(const char *name, const char *path, unsigned bitrate, unsigned gop) {
// FIXME: 30 or 0? https://github.com/6by9/yavta/blob/master/yavta.c#L2100
// По логике вещей правильно 0, но почему-то на низких разрешениях типа 640x480
// енкодер через несколько секунд перестает производить корректные фреймы.
us_m2m_encoder_s *us_m2m_h264_encoder_init(const char *name, const char *path, uint bitrate, uint gop) {
bitrate *= 1000; // From Kbps
return _m2m_encoder_init(name, path, V4L2_PIX_FMT_H264, 30, bitrate, gop, 0, true);
return _m2m_encoder_init(name, path, V4L2_PIX_FMT_H264, bitrate, gop, 0, true);
}
us_m2m_encoder_s *us_m2m_mjpeg_encoder_init(const char *name, const char *path, unsigned quality) {
us_m2m_encoder_s *us_m2m_mjpeg_encoder_init(const char *name, const char *path, uint quality) {
const double b_min = 25;
const double b_max = 20000;
const double step = 25;
@@ -63,13 +76,12 @@ us_m2m_encoder_s *us_m2m_mjpeg_encoder_init(const char *name, const char *path,
bitrate = step * round(bitrate / step);
bitrate *= 1000; // From Kbps
assert(bitrate > 0);
// FIXME: То же самое про 30 or 0, но еще даже не проверено на низких разрешениях
return _m2m_encoder_init(name, path, V4L2_PIX_FMT_MJPEG, 30, bitrate, 0, 0, true);
return _m2m_encoder_init(name, path, V4L2_PIX_FMT_MJPEG, bitrate, 0, 0, true);
}
us_m2m_encoder_s *us_m2m_jpeg_encoder_init(const char *name, const char *path, unsigned quality) {
us_m2m_encoder_s *us_m2m_jpeg_encoder_init(const char *name, const char *path, uint quality) {
// FIXME: DMA не работает
return _m2m_encoder_init(name, path, V4L2_PIX_FMT_JPEG, 30, 0, 0, quality, false);
return _m2m_encoder_init(name, path, V4L2_PIX_FMT_JPEG, 0, 0, quality, false);
}
void us_m2m_encoder_destroy(us_m2m_encoder_s *enc) {
@@ -81,22 +93,18 @@ void us_m2m_encoder_destroy(us_m2m_encoder_s *enc) {
}
int us_m2m_encoder_compress(us_m2m_encoder_s *enc, const us_frame_s *src, us_frame_s *dest, bool force_key) {
us_m2m_encoder_runtime_s *const run = enc->run;
us_frame_encoding_begin(src, dest, (enc->output_format == V4L2_PIX_FMT_MJPEG ? V4L2_PIX_FMT_JPEG : enc->output_format));
if (
_RUN(width) != src->width
|| _RUN(height) != src->height
|| _RUN(input_format) != src->format
|| _RUN(stride) != src->stride
|| _RUN(dma) != (enc->allow_dma && src->dma_fd >= 0)
) {
_m2m_encoder_prepare(enc, src);
}
if (!_RUN(ready)) { // Already prepared but failed
_m2m_encoder_ensure(enc, src);
if (!run->ready) { // Already prepared but failed
return -1;
}
force_key = (enc->output_format == V4L2_PIX_FMT_H264 && (force_key || _RUN(last_online) != src->online));
force_key = (enc->output_format == V4L2_PIX_FMT_H264 && (force_key || run->last_online != src->online));
_E_LOG_DEBUG("Compressing new frame; force_key=%d ...", force_key);
if (_m2m_encoder_compress_raw(enc, src, dest, force_key) < 0) {
_m2m_encoder_cleanup(enc);
@@ -109,13 +117,13 @@ int us_m2m_encoder_compress(us_m2m_encoder_s *enc, const us_frame_s *src, us_fra
_E_LOG_VERBOSE("Compressed new frame: size=%zu, time=%0.3Lf, force_key=%d",
dest->used, dest->encode_end_ts - dest->encode_begin_ts, force_key);
_RUN(last_online) = src->online;
run->last_online = src->online;
return 0;
}
static us_m2m_encoder_s *_m2m_encoder_init(
const char *name, const char *path, unsigned output_format,
unsigned fps, unsigned bitrate, unsigned gop, unsigned quality, bool allow_dma) {
const char *name, const char *path, uint output_format,
uint bitrate, uint gop, uint quality, bool allow_dma) {
US_LOG_INFO("%s: Initializing encoder ...", name);
@@ -133,7 +141,6 @@ static us_m2m_encoder_s *_m2m_encoder_init(
enc->path = us_strdup(path);
}
enc->output_format = output_format;
enc->fps = fps;
enc->bitrate = bitrate;
enc->gop = gop;
enc->quality = quality;
@@ -143,30 +150,49 @@ static us_m2m_encoder_s *_m2m_encoder_init(
}
#define _E_XIOCTL(x_request, x_value, x_msg, ...) { \
if (us_xioctl(_RUN(fd), x_request, x_value) < 0) { \
if (us_xioctl(run->fd, x_request, x_value) < 0) { \
_E_LOG_PERROR(x_msg, ##__VA_ARGS__); \
goto error; \
} \
}
static void _m2m_encoder_prepare(us_m2m_encoder_s *enc, const us_frame_s *frame) {
static void _m2m_encoder_ensure(us_m2m_encoder_s *enc, const us_frame_s *frame) {
us_m2m_encoder_runtime_s *const run = enc->run;
const bool dma = (enc->allow_dma && frame->dma_fd >= 0);
if (
run->p_width == frame->width
&& run->p_height == frame->height
&& run->p_input_format == frame->format
&& run->p_stride == frame->stride
&& run->p_dma == dma
) {
return; // Configured already
}
_E_LOG_INFO("Configuring encoder: DMA=%d ...", dma);
_E_LOG_DEBUG("Encoder changes: width=%u->%u, height=%u->%u, input_format=%u->%u, stride=%u->%u, dma=%u->%u",
run->p_width, frame->width,
run->p_height, frame->height,
run->p_input_format, frame->format,
run->p_stride, frame->stride,
run->p_dma, dma);
_m2m_encoder_cleanup(enc);
_RUN(width) = frame->width;
_RUN(height) = frame->height;
_RUN(input_format) = frame->format;
_RUN(stride) = frame->stride;
_RUN(dma) = dma;
run->p_width = frame->width;
run->p_height = frame->height;
run->p_input_format = frame->format;
run->p_stride = frame->stride;
run->p_dma = dma;
if ((_RUN(fd) = open(enc->path, O_RDWR)) < 0) {
_E_LOG_DEBUG("Opening encoder device ...");
if ((run->fd = open(enc->path, O_RDWR)) < 0) {
_E_LOG_PERROR("Can't open encoder device");
goto error;
}
_E_LOG_DEBUG("Encoder device fd=%d opened", _RUN(fd));
_E_LOG_DEBUG("Encoder device fd=%d opened", run->fd);
# define SET_OPTION(x_cid, x_value) { \
struct v4l2_control m_ctl = {0}; \
@@ -175,12 +201,11 @@ static void _m2m_encoder_prepare(us_m2m_encoder_s *enc, const us_frame_s *frame)
_E_LOG_DEBUG("Configuring option " #x_cid " ..."); \
_E_XIOCTL(VIDIOC_S_CTRL, &m_ctl, "Can't set option " #x_cid); \
}
if (enc->output_format == V4L2_PIX_FMT_H264) {
SET_OPTION(V4L2_CID_MPEG_VIDEO_BITRATE, enc->bitrate);
SET_OPTION(V4L2_CID_MPEG_VIDEO_H264_I_PERIOD, enc->gop);
SET_OPTION(V4L2_CID_MPEG_VIDEO_H264_PROFILE, V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE);
if (_RUN(width) * _RUN(height) <= 1920 * 1080) { // https://forums.raspberrypi.com/viewtopic.php?t=291447#p1762296
if (run->p_width * run->p_height <= 1920 * 1080) { // https://forums.raspberrypi.com/viewtopic.php?t=291447#p1762296
SET_OPTION(V4L2_CID_MPEG_VIDEO_H264_LEVEL, V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
} else {
SET_OPTION(V4L2_CID_MPEG_VIDEO_H264_LEVEL, V4L2_MPEG_VIDEO_H264_LEVEL_5_1);
@@ -193,19 +218,18 @@ static void _m2m_encoder_prepare(us_m2m_encoder_s *enc, const us_frame_s *frame)
} else if (enc->output_format == V4L2_PIX_FMT_JPEG) {
SET_OPTION(V4L2_CID_JPEG_COMPRESSION_QUALITY, enc->quality);
}
# undef SET_OPTION
{
struct v4l2_format fmt = {0};
fmt.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
fmt.fmt.pix_mp.width = _RUN(width);
fmt.fmt.pix_mp.height = _RUN(height);
fmt.fmt.pix_mp.pixelformat = _RUN(input_format);
fmt.fmt.pix_mp.width = run->p_width;
fmt.fmt.pix_mp.height = run->p_height;
fmt.fmt.pix_mp.pixelformat = run->p_input_format;
fmt.fmt.pix_mp.field = V4L2_FIELD_ANY;
fmt.fmt.pix_mp.colorspace = V4L2_COLORSPACE_JPEG; // libcamera currently has no means to request the right colour space
fmt.fmt.pix_mp.num_planes = 1;
// fmt.fmt.pix_mp.plane_fmt[0].bytesperline = _RUN(stride);
// fmt.fmt.pix_mp.plane_fmt[0].bytesperline = run->p_stride;
_E_LOG_DEBUG("Configuring INPUT format ...");
_E_XIOCTL(VIDIOC_S_FMT, &fmt, "Can't set INPUT format");
}
@@ -213,8 +237,8 @@ static void _m2m_encoder_prepare(us_m2m_encoder_s *enc, const us_frame_s *frame)
{
struct v4l2_format fmt = {0};
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
fmt.fmt.pix_mp.width = _RUN(width);
fmt.fmt.pix_mp.height = _RUN(height);
fmt.fmt.pix_mp.width = run->p_width;
fmt.fmt.pix_mp.height = run->p_height;
fmt.fmt.pix_mp.pixelformat = enc->output_format;
fmt.fmt.pix_mp.field = V4L2_FIELD_ANY;
fmt.fmt.pix_mp.colorspace = V4L2_COLORSPACE_DEFAULT;
@@ -236,21 +260,37 @@ static void _m2m_encoder_prepare(us_m2m_encoder_s *enc, const us_frame_s *frame)
}
}
if (enc->fps > 0) { // TODO: Check this for MJPEG
if (run->p_width * run->p_height <= 1280 * 720) {
// H264 требует каких-то лимитов. Больше 30 не поддерживается, а при 0
// через какое-то время начинает производить некорректные фреймы.
// Если же привысить fps, то резко увеличивается время кодирования.
run->fps_limit = 60;
} else {
run->fps_limit = 30;
}
// H264: 30 or 0? https://github.com/6by9/yavta/blob/master/yavta.c#L2100
// По логике вещей правильно 0, но почему-то на низких разрешениях типа 640x480
// енкодер через несколько секунд перестает производить корректные фреймы.
// JPEG: То же самое про 30 or 0, но еще даже не проверено на низких разрешениях.
{
struct v4l2_streamparm setfps = {0};
setfps.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
setfps.parm.output.timeperframe.numerator = 1;
setfps.parm.output.timeperframe.denominator = enc->fps;
setfps.parm.output.timeperframe.denominator = run->fps_limit;
_E_LOG_DEBUG("Configuring INPUT FPS ...");
_E_XIOCTL(VIDIOC_S_PARM, &setfps, "Can't set INPUT FPS");
}
if (_m2m_encoder_init_buffers(enc, (dma ? "INPUT-DMA" : "INPUT"), V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
&_RUN(input_bufs), &_RUN(n_input_bufs), dma) < 0) {
if (_m2m_encoder_init_buffers(
enc, (dma ? "INPUT-DMA" : "INPUT"), V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
&run->input_bufs, &run->n_input_bufs, dma
) < 0) {
goto error;
}
if (_m2m_encoder_init_buffers(enc, "OUTPUT", V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
&_RUN(output_bufs), &_RUN(n_output_bufs), false) < 0) {
if (_m2m_encoder_init_buffers(
enc, "OUTPUT", V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
&run->output_bufs, &run->n_output_bufs, false
) < 0) {
goto error;
}
@@ -264,18 +304,20 @@ static void _m2m_encoder_prepare(us_m2m_encoder_s *enc, const us_frame_s *frame)
_E_XIOCTL(VIDIOC_STREAMON, &type, "Can't start OUTPUT");
}
_RUN(ready) = true;
_E_LOG_DEBUG("Encoder state: *** READY ***");
run->ready = true;
_E_LOG_INFO("Encoder is ready");
return;
error:
_m2m_encoder_cleanup(enc);
_E_LOG_ERROR("Encoder destroyed due an error (prepare)");
error:
_m2m_encoder_cleanup(enc);
_E_LOG_ERROR("Encoder destroyed due an error (prepare)");
}
static int _m2m_encoder_init_buffers(
us_m2m_encoder_s *enc, const char *name, enum v4l2_buf_type type,
us_m2m_buffer_s **bufs_ptr, unsigned *n_bufs_ptr, bool dma) {
us_m2m_buffer_s **bufs_ptr, uint *n_bufs_ptr, bool dma) {
us_m2m_encoder_runtime_s *const run = enc->run;
_E_LOG_DEBUG("Initializing %s buffers ...", name);
@@ -294,98 +336,102 @@ static int _m2m_encoder_init_buffers(
if (dma) {
*n_bufs_ptr = req.count;
} else {
US_CALLOC(*bufs_ptr, req.count);
for (*n_bufs_ptr = 0; *n_bufs_ptr < req.count; ++(*n_bufs_ptr)) {
struct v4l2_buffer buf = {0};
struct v4l2_plane plane = {0};
buf.type = type;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = *n_bufs_ptr;
buf.length = 1;
buf.m.planes = &plane;
_E_LOG_DEBUG("Querying %s buffer=%u ...", name, *n_bufs_ptr);
_E_XIOCTL(VIDIOC_QUERYBUF, &buf, "Can't query %s buffer=%u", name, *n_bufs_ptr);
_E_LOG_DEBUG("Mapping %s buffer=%u ...", name, *n_bufs_ptr);
if (((*bufs_ptr)[*n_bufs_ptr].data = mmap(
NULL,
plane.length,
PROT_READ | PROT_WRITE,
MAP_SHARED,
_RUN(fd),
plane.m.mem_offset
)) == MAP_FAILED) {
_E_LOG_PERROR("Can't map %s buffer=%u", name, *n_bufs_ptr);
goto error;
}
assert((*bufs_ptr)[*n_bufs_ptr].data != NULL);
(*bufs_ptr)[*n_bufs_ptr].allocated = plane.length;
_E_LOG_DEBUG("Queuing %s buffer=%u ...", name, *n_bufs_ptr);
_E_XIOCTL(VIDIOC_QBUF, &buf, "Can't queue %s buffer=%u", name, *n_bufs_ptr);
}
return 0;
}
US_CALLOC(*bufs_ptr, req.count);
for (*n_bufs_ptr = 0; *n_bufs_ptr < req.count; ++(*n_bufs_ptr)) {
struct v4l2_buffer buf = {0};
struct v4l2_plane plane = {0};
buf.type = type;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = *n_bufs_ptr;
buf.length = 1;
buf.m.planes = &plane;
_E_LOG_DEBUG("Querying %s buffer=%u ...", name, *n_bufs_ptr);
_E_XIOCTL(VIDIOC_QUERYBUF, &buf, "Can't query %s buffer=%u", name, *n_bufs_ptr);
_E_LOG_DEBUG("Mapping %s buffer=%u ...", name, *n_bufs_ptr);
if (((*bufs_ptr)[*n_bufs_ptr].data = mmap(
NULL, plane.length,
PROT_READ | PROT_WRITE, MAP_SHARED,
run->fd, plane.m.mem_offset
)) == MAP_FAILED) {
_E_LOG_PERROR("Can't map %s buffer=%u", name, *n_bufs_ptr);
goto error;
}
assert((*bufs_ptr)[*n_bufs_ptr].data != NULL);
(*bufs_ptr)[*n_bufs_ptr].allocated = plane.length;
_E_LOG_DEBUG("Queuing %s buffer=%u ...", name, *n_bufs_ptr);
_E_XIOCTL(VIDIOC_QBUF, &buf, "Can't queue %s buffer=%u", name, *n_bufs_ptr);
}
_E_LOG_DEBUG("All %s buffers are ready", name);
return 0;
error:
return -1;
error: // Mostly for _E_XIOCTL
return -1;
}
static void _m2m_encoder_cleanup(us_m2m_encoder_s *enc) {
if (_RUN(ready)) {
us_m2m_encoder_runtime_s *const run = enc->run;
bool say = false;
if (run->ready) {
say = true;
# define STOP_STREAM(x_name, x_type) { \
enum v4l2_buf_type m_type_var = x_type; \
_E_LOG_DEBUG("Stopping %s ...", x_name); \
if (us_xioctl(_RUN(fd), VIDIOC_STREAMOFF, &m_type_var) < 0) { \
if (us_xioctl(run->fd, VIDIOC_STREAMOFF, &m_type_var) < 0) { \
_E_LOG_PERROR("Can't stop %s", x_name); \
} \
}
STOP_STREAM("OUTPUT", V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
STOP_STREAM("INPUT", V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
# undef STOP_STREAM
}
# define DESTROY_BUFFERS(x_name, x_target) { \
if (_RUN(x_target##_bufs) != NULL) { \
for (unsigned m_index = 0; m_index < _RUN(n_##x_target##_bufs); ++m_index) { \
if (_RUN(x_target##_bufs[m_index].allocated) > 0 && _RUN(x_target##_bufs[m_index].data) != NULL) { \
if (munmap(_RUN(x_target##_bufs[m_index].data), _RUN(x_target##_bufs[m_index].allocated)) < 0) { \
# define DELETE_BUFFERS(x_name, x_target) { \
if (run->x_target##_bufs != NULL) { \
say = true; \
for (uint m_index = 0; m_index < run->n_##x_target##_bufs; ++m_index) { \
us_m2m_buffer_s *m_buf = &run->x_target##_bufs[m_index]; \
if (m_buf->allocated > 0 && m_buf->data != NULL) { \
if (munmap(m_buf->data, m_buf->allocated) < 0) { \
_E_LOG_PERROR("Can't unmap %s buffer=%u", #x_name, m_index); \
} \
} \
} \
free(_RUN(x_target##_bufs)); \
_RUN(x_target##_bufs) = NULL; \
US_DELETE(run->x_target##_bufs, free); \
} \
_RUN(n_##x_target##_bufs) = 0; \
run->n_##x_target##_bufs = 0; \
}
DELETE_BUFFERS("OUTPUT", output);
DELETE_BUFFERS("INPUT", input);
# undef DELETE_BUFFERS
DESTROY_BUFFERS("OUTPUT", output);
DESTROY_BUFFERS("INPUT", input);
# undef DESTROY_BUFFERS
if (_RUN(fd) >= 0) {
if (close(_RUN(fd)) < 0) {
if (run->fd >= 0) {
say = true;
if (close(run->fd) < 0) {
_E_LOG_PERROR("Can't close encoder device");
}
_RUN(fd) = -1;
run->fd = -1;
}
_RUN(last_online) = -1;
_RUN(ready) = false;
run->last_online = -1;
run->ready = false;
_E_LOG_DEBUG("Encoder state: ~~~ NOT READY ~~~");
if (say) {
_E_LOG_INFO("Encoder closed");
}
}
static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *src, us_frame_s *dest, bool force_key) {
assert(_RUN(ready));
us_m2m_encoder_runtime_s *const run = enc->run;
_E_LOG_DEBUG("Compressing new frame; force_key=%d ...", force_key);
assert(run->ready);
if (force_key) {
struct v4l2_control ctl = {0};
@@ -401,7 +447,7 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
input_buf.length = 1;
input_buf.m.planes = &input_plane;
if (_RUN(dma)) {
if (run->p_dma) {
input_buf.index = 0;
input_buf.memory = V4L2_MEMORY_DMABUF;
input_buf.field = V4L2_FIELD_NONE;
@@ -411,38 +457,38 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
input_buf.memory = V4L2_MEMORY_MMAP;
_E_LOG_DEBUG("Grabbing INPUT buffer ...");
_E_XIOCTL(VIDIOC_DQBUF, &input_buf, "Can't grab INPUT buffer");
if (input_buf.index >= _RUN(n_input_bufs)) {
if (input_buf.index >= run->n_input_bufs) {
_E_LOG_ERROR("V4L2 error: grabbed invalid INPUT: buffer=%u, n_bufs=%u",
input_buf.index, _RUN(n_input_bufs));
input_buf.index, run->n_input_bufs);
goto error;
}
_E_LOG_DEBUG("Grabbed INPUT buffer=%u", input_buf.index);
}
const uint64_t now = us_get_now_monotonic_u64();
const u64 now_ts = us_get_now_monotonic_u64();
struct timeval ts = {
.tv_sec = now / 1000000,
.tv_usec = now % 1000000,
.tv_sec = now_ts / 1000000,
.tv_usec = now_ts % 1000000,
};
input_buf.timestamp.tv_sec = ts.tv_sec;
input_buf.timestamp.tv_usec = ts.tv_usec;
input_plane.bytesused = src->used;
input_plane.length = src->used;
if (!_RUN(dma)) {
memcpy(_RUN(input_bufs[input_buf.index].data), src->data, src->used);
if (!run->p_dma) {
memcpy(run->input_bufs[input_buf.index].data, src->data, src->used);
}
const char *input_name = (_RUN(dma) ? "INPUT-DMA" : "INPUT");
const char *input_name = (run->p_dma ? "INPUT-DMA" : "INPUT");
_E_LOG_DEBUG("Sending%s %s buffer ...", (!_RUN(dma) ? " (releasing)" : ""), input_name);
_E_LOG_DEBUG("Sending%s %s buffer ...", (!run->p_dma ? " (releasing)" : ""), input_name);
_E_XIOCTL(VIDIOC_QBUF, &input_buf, "Can't send %s buffer", input_name);
// Для не-DMA отправка буфера по факту являтся освобождением этого буфера
bool input_released = !_RUN(dma);
bool input_released = !run->p_dma;
while (true) {
struct pollfd enc_poll = {_RUN(fd), POLLIN, 0};
struct pollfd enc_poll = {run->fd, POLLIN, 0};
_E_LOG_DEBUG("Polling encoder ...");
if (poll(&enc_poll, 1, 1000) < 0 && errno != EINTR) {
@@ -474,7 +520,7 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
// входному (с тем же таймстампом).
_E_LOG_DEBUG("Need to retry OUTPUT buffer due timestamp mismatch");
} else {
us_frame_set_data(dest, _RUN(output_bufs[output_buf.index].data), output_plane.bytesused);
us_frame_set_data(dest, run->output_bufs[output_buf.index].data, output_plane.bytesused);
dest->key = output_buf.flags & V4L2_BUF_FLAG_KEYFRAME;
dest->gop = enc->gop;
done = true;
@@ -488,10 +534,10 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
}
}
}
return 0;
error:
return -1;
error: // Mostly for _E_XIOCTL
return -1;
}
#undef _E_XIOCTL

View File

@@ -22,65 +22,49 @@
#pragma once
#include <stdlib.h>
#include <stdbool.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <fcntl.h>
#include <poll.h>
#include <errno.h>
#include <assert.h>
#include <sys/mman.h>
#include <linux/videodev2.h>
#include "../libs/tools.h"
#include "../libs/logging.h"
#include "../libs/types.h"
#include "../libs/frame.h"
#include "../libs/xioctl.h"
typedef struct {
uint8_t *data;
size_t allocated;
u8 *data;
uz allocated;
} us_m2m_buffer_s;
typedef struct {
int fd;
uint fps_limit;
us_m2m_buffer_s *input_bufs;
unsigned n_input_bufs;
uint n_input_bufs;
us_m2m_buffer_s *output_bufs;
unsigned n_output_bufs;
uint n_output_bufs;
unsigned width;
unsigned height;
unsigned input_format;
unsigned stride;
bool dma;
bool ready;
uint p_width;
uint p_height;
uint p_input_format;
uint p_stride;
bool p_dma;
int last_online;
bool ready;
int last_online;
} us_m2m_encoder_runtime_s;
typedef struct {
char *name;
char *path;
unsigned output_format;
unsigned fps;
unsigned bitrate;
unsigned gop;
unsigned quality;
bool allow_dma;
char *name;
char *path;
uint output_format;
uint bitrate;
uint gop;
uint quality;
bool allow_dma;
us_m2m_encoder_runtime_s *run;
} us_m2m_encoder_s;
us_m2m_encoder_s *us_m2m_h264_encoder_init(const char *name, const char *path, unsigned bitrate, unsigned gop);
us_m2m_encoder_s *us_m2m_mjpeg_encoder_init(const char *name, const char *path, unsigned quality);
us_m2m_encoder_s *us_m2m_jpeg_encoder_init(const char *name, const char *path, unsigned quality);
us_m2m_encoder_s *us_m2m_h264_encoder_init(const char *name, const char *path, uint bitrate, uint gop);
us_m2m_encoder_s *us_m2m_mjpeg_encoder_init(const char *name, const char *path, uint quality);
us_m2m_encoder_s *us_m2m_jpeg_encoder_init(const char *name, const char *path, uint quality);
void us_m2m_encoder_destroy(us_m2m_encoder_s *enc);
int us_m2m_encoder_compress(us_m2m_encoder_s *enc, const us_frame_s *src, us_frame_s *dest, bool force_key);

View File

@@ -22,7 +22,6 @@
#include <stdio.h>
#include <stdbool.h>
#include <signal.h>
#include <pthread.h>
@@ -30,6 +29,7 @@
#include "../libs/threading.h"
#include "../libs/logging.h"
#include "../libs/device.h"
#include "../libs/signal.h"
#include "options.h"
#include "encoder.h"
@@ -54,7 +54,7 @@ static void _block_thread_signals(void) {
static void *_stream_loop_thread(void *arg) {
(void)arg;
US_THREAD_RENAME("stream");
US_THREAD_SETTLE("stream");
_block_thread_signals();
us_stream_loop(_g_stream);
return NULL;
@@ -62,7 +62,7 @@ static void *_stream_loop_thread(void *arg) {
static void *_server_loop_thread(void *arg) {
(void)arg;
US_THREAD_RENAME("http");
US_THREAD_SETTLE("http");
_block_thread_signals();
us_server_loop(_g_server);
return NULL;
@@ -76,24 +76,6 @@ static void _signal_handler(int signum) {
us_server_loop_break(_g_server);
}
static void _install_signal_handlers(void) {
struct sigaction sig_act = {0};
assert(!sigemptyset(&sig_act.sa_mask));
sig_act.sa_handler = _signal_handler;
assert(!sigaddset(&sig_act.sa_mask, SIGINT));
assert(!sigaddset(&sig_act.sa_mask, SIGTERM));
US_LOG_DEBUG("Installing SIGINT handler ...");
assert(!sigaction(SIGINT, &sig_act, NULL));
US_LOG_DEBUG("Installing SIGTERM handler ...");
assert(!sigaction(SIGTERM, &sig_act, NULL));
US_LOG_DEBUG("Ignoring SIGPIPE ...");
assert(signal(SIGPIPE, SIG_IGN) != SIG_ERR);
}
int main(int argc, char *argv[]) {
assert(argc >= 0);
int exit_code = 0;
@@ -112,7 +94,7 @@ int main(int argc, char *argv[]) {
us_gpio_init();
# endif
_install_signal_handlers();
us_install_signals_handler(_signal_handler, true);
if ((exit_code = us_server_listen(_g_server)) == 0) {
# ifdef WITH_GPIO

View File

@@ -250,7 +250,7 @@ us_options_s *us_options_init(unsigned argc, char *argv[]) {
}
void us_options_destroy(us_options_s *options) {
US_DELETE(options->sink, us_memsink_destroy);
US_DELETE(options->jpeg_sink, us_memsink_destroy);
US_DELETE(options->raw_sink, us_memsink_destroy);
US_DELETE(options->h264_sink, us_memsink_destroy);
@@ -296,11 +296,13 @@ int options_parse(us_options_s *options, us_device_s *dev, us_encoder_s *enc, us
break; \
}
# define OPT_PARSE(x_name, x_dest, x_func, x_invalid, x_available) { \
if ((x_dest = x_func(optarg)) == x_invalid) { \
# define OPT_PARSE_ENUM(x_name, x_dest, x_func, x_available) { \
const int m_value = x_func(optarg); \
if (m_value < 0) { \
printf("Unknown " x_name ": %s; available: %s\n", optarg, x_available); \
return -1; \
} \
x_dest = m_value; \
break; \
}
@@ -336,7 +338,7 @@ int options_parse(us_options_s *options, us_device_s *dev, us_encoder_s *enc, us
bool x_prefix##_rm = false; \
unsigned x_prefix##_client_ttl = 10; \
unsigned x_prefix##_timeout = 1;
ADD_SINK(sink);
ADD_SINK(jpeg_sink);
ADD_SINK(raw_sink);
ADD_SINK(h264_sink);
# undef ADD_SINK
@@ -355,10 +357,10 @@ int options_parse(us_options_s *options, us_device_s *dev, us_encoder_s *enc, us
case _O_RESOLUTION: OPT_RESOLUTION("--resolution", dev->width, dev->height, true);
# pragma GCC diagnostic ignored "-Wsign-compare"
# pragma GCC diagnostic push
case _O_FORMAT: OPT_PARSE("pixel format", dev->format, us_device_parse_format, US_FORMAT_UNKNOWN, US_FORMATS_STR);
case _O_FORMAT: OPT_PARSE_ENUM("pixel format", dev->format, us_device_parse_format, US_FORMATS_STR);
# pragma GCC diagnostic pop
case _O_TV_STANDARD: OPT_PARSE("TV standard", dev->standard, us_device_parse_standard, US_STANDARD_UNKNOWN, US_STANDARDS_STR);
case _O_IO_METHOD: OPT_PARSE("IO method", dev->io_method, us_device_parse_io_method, US_IO_METHOD_UNKNOWN, US_IO_METHODS_STR);
case _O_TV_STANDARD: OPT_PARSE_ENUM("TV standard", dev->standard, us_device_parse_standard, US_STANDARDS_STR);
case _O_IO_METHOD: OPT_PARSE_ENUM("IO method", dev->io_method, us_device_parse_io_method, US_IO_METHODS_STR);
case _O_DESIRED_FPS: OPT_NUMBER("--desired-fps", dev->desired_fps, 0, US_VIDEO_MAX_FPS, 0);
case _O_MIN_FRAME_SIZE: OPT_NUMBER("--min-frame-size", dev->min_frame_size, 1, 8192, 0);
case _O_PERSISTENT: OPT_SET(dev->persistent, true);
@@ -366,10 +368,10 @@ int options_parse(us_options_s *options, us_device_s *dev, us_encoder_s *enc, us
case _O_BUFFERS: OPT_NUMBER("--buffers", dev->n_bufs, 1, 32, 0);
case _O_WORKERS: OPT_NUMBER("--workers", enc->n_workers, 1, 32, 0);
case _O_QUALITY: OPT_NUMBER("--quality", dev->jpeg_quality, 1, 100, 0);
case _O_ENCODER: OPT_PARSE("encoder type", enc->type, us_encoder_parse_type, US_ENCODER_TYPE_UNKNOWN, ENCODER_TYPES_STR);
case _O_ENCODER: OPT_PARSE_ENUM("encoder type", enc->type, us_encoder_parse_type, ENCODER_TYPES_STR);
case _O_GLITCHED_RESOLUTIONS: break; // Deprecated
case _O_BLANK: break; // Deprecated
case _O_LAST_AS_BLANK: OPT_NUMBER("--last-as-blank", stream->last_as_blank, 0, 86400, 0);
case _O_LAST_AS_BLANK: break; // Deprecated
case _O_SLOWDOWN: OPT_SET(stream->slowdown, true);
case _O_DEVICE_TIMEOUT: OPT_NUMBER("--device-timeout", dev->timeout, 1, 60, 0);
case _O_DEVICE_ERROR_DELAY: OPT_NUMBER("--device-error-delay", stream->error_delay, 1, 60, 0);
@@ -434,7 +436,7 @@ int options_parse(us_options_s *options, us_device_s *dev, us_encoder_s *enc, us
case _O_##x_up##_RM: OPT_SET(x_lp##_rm, true); \
case _O_##x_up##_CLIENT_TTL: OPT_NUMBER("--" #x_opt "sink-client-ttl", x_lp##_client_ttl, 1, 60, 0); \
case _O_##x_up##_TIMEOUT: OPT_NUMBER("--" #x_opt "sink-timeout", x_lp##_timeout, 1, 60, 0);
ADD_SINK("", sink, SINK)
ADD_SINK("", jpeg_sink, SINK)
ADD_SINK("raw-", raw_sink, RAW_SINK)
ADD_SINK("h264-", h264_sink, H264_SINK)
case _O_H264_BITRATE: OPT_NUMBER("--h264-bitrate", stream->h264_bitrate, 25, 20000, 0);
@@ -457,7 +459,7 @@ int options_parse(us_options_s *options, us_device_s *dev, us_encoder_s *enc, us
};
break;
# endif
case _O_EXIT_ON_NO_CLIENTS: OPT_NUMBER("--exit-on-no-clients", server->exit_on_no_clients, 0, 86400, 0);
case _O_EXIT_ON_NO_CLIENTS: OPT_NUMBER("--exit-on-no-clients", stream->exit_on_no_clients, 0, 86400, 0);
# ifdef WITH_SETPROCTITLE
case _O_PROCESS_NAME_PREFIX: OPT_SET(process_name_prefix, optarg);
# endif
@@ -495,7 +497,7 @@ int options_parse(us_options_s *options, us_device_s *dev, us_encoder_s *enc, us
} \
stream->x_prefix = options->x_prefix; \
}
ADD_SINK("JPEG", sink);
ADD_SINK("JPEG", jpeg_sink);
ADD_SINK("RAW", raw_sink);
ADD_SINK("H264", h264_sink);
# undef ADD_SINK
@@ -622,13 +624,8 @@ static void _help(FILE *fp, const us_device_s *dev, const us_encoder_s *enc, con
SAY(" * M2M-IMAGE ── GPU-accelerated JPEG encoding using V4L2 M2M image interface;");
SAY(" * NOOP ─────── Don't compress MJPEG stream (do nothing).\n");
SAY(" -g|--glitched-resolutions <WxH,...> ─ It doesn't do anything. Still here for compatibility.\n");
SAY(" -k|--blank <path> ─────────────────── It doesn't do anything. Still here for compatibility..\n");
SAY(" during the streaming. Default: black screen 640x480 with 'NO SIGNAL'.\n");
SAY(" -K|--last-as-blank <sec> ──────────── Show the last frame received from the camera after it was disconnected,");
SAY(" but no more than specified time (or endlessly if 0 is specified).");
SAY(" If the device has not yet been online, display some error text.");
SAY(" Default: disabled.");
SAY(" Note: currently this option has no effect on memory sinks.\n");
SAY(" -k|--blank <path> ─────────────────── It doesn't do anything. Still here for compatibility.\n");
SAY(" -K|--last-as-blank <sec> ──────────── It doesn't do anything. Still here for compatibility.\n");
SAY(" -l|--slowdown ─────────────────────── Slowdown capturing to 1 FPS or less when no stream or sink clients");
SAY(" are connected. Useful to reduce CPU consumption. Default: disabled.\n");
SAY(" --device-timeout <sec> ────────────── Timeout for device querying. Default: %u.\n", dev->timeout);

View File

@@ -53,7 +53,7 @@ typedef struct {
unsigned argc;
char **argv;
char **argv_copy;
us_memsink_s *sink;
us_memsink_s *jpeg_sink;
us_memsink_s *raw_sink;
us_memsink_s *h264_sink;
} us_options_s;

View File

@@ -22,25 +22,64 @@
#include "stream.h"
#include <stdlib.h>
#include <stdatomic.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
static us_workers_pool_s *_stream_init_loop(us_stream_s *stream);
static void _stream_expose_frame(us_stream_s *stream, us_frame_s *frame);
#include <pthread.h>
#include "../libs/types.h"
#include "../libs/tools.h"
#include "../libs/threading.h"
#include "../libs/process.h"
#include "../libs/logging.h"
#include "../libs/ring.h"
#include "../libs/frame.h"
#include "../libs/memsink.h"
#include "../libs/device.h"
#include "blank.h"
#include "encoder.h"
#include "workers.h"
#include "h264.h"
#ifdef WITH_GPIO
# include "gpio/gpio.h"
#endif
#define _RUN(x_next) stream->run->x_next
typedef struct {
pthread_t tid;
us_device_s *dev;
us_queue_s *queue;
pthread_mutex_t *mutex;
atomic_bool *stop;
} _releaser_context_s;
#define _SINK_PUT(x_sink, x_frame) { \
if (stream->x_sink && us_memsink_server_check(stream->x_sink, x_frame)) {\
bool m_key_requested; /* Unused */ \
us_memsink_server_put(stream->x_sink, x_frame, &m_key_requested); \
} \
}
typedef struct {
pthread_t tid;
us_queue_s *queue;
us_stream_s *stream;
atomic_bool *stop;
} _worker_context_s;
#define _H264_PUT(x_frame, x_force_key) { \
if (_RUN(h264)) { \
us_h264_stream_process(_RUN(h264), x_frame, x_force_key); \
} \
}
static void _stream_set_capture_state(us_stream_s *stream, uint width, uint height, bool online, uint captured_fps);
static void *_releaser_thread(void *v_ctx);
static void *_jpeg_thread(void *v_ctx);
static void *_h264_thread(void *v_ctx);
static void *_raw_thread(void *v_ctx);
static us_hw_buffer_s *_get_latest_hw(us_queue_s *queue);
static bool _stream_has_jpeg_clients_cached(us_stream_s *stream);
static bool _stream_has_any_clients_cached(us_stream_s *stream);
static int _stream_init_loop(us_stream_s *stream);
static void _stream_expose_jpeg(us_stream_s *stream, const us_frame_s *frame);
static void _stream_expose_raw(us_stream_s *stream, const us_frame_s *frame);
static void _stream_check_suicide(us_stream_s *stream);
us_stream_s *us_stream_init(us_device_s *dev, us_encoder_s *enc) {
@@ -48,7 +87,9 @@ us_stream_s *us_stream_init(us_device_s *dev, us_encoder_s *enc) {
US_CALLOC(run, 1);
US_RING_INIT_WITH_ITEMS(run->http_jpeg_ring, 4, us_frame_init);
atomic_init(&run->http_has_clients, false);
atomic_init(&run->captured_fps, 0);
atomic_init(&run->http_snapshot_requested, 0);
atomic_init(&run->http_last_request_ts, 0);
atomic_init(&run->http_capture_state, 0);
atomic_init(&run->stop, false);
run->blank = us_blank_init();
@@ -56,11 +97,13 @@ us_stream_s *us_stream_init(us_device_s *dev, us_encoder_s *enc) {
US_CALLOC(stream, 1);
stream->dev = dev;
stream->enc = enc;
stream->last_as_blank = -1;
stream->error_delay = 1;
stream->h264_bitrate = 5000; // Kbps
stream->h264_gop = 30;
stream->run = run;
us_blank_draw(run->blank, "< NO SIGNAL >", dev->width, dev->height);
_stream_set_capture_state(stream, dev->width, dev->height, false, 0);
return stream;
}
@@ -72,257 +115,465 @@ void us_stream_destroy(us_stream_s *stream) {
}
void us_stream_loop(us_stream_s *stream) {
US_LOG_INFO("Using V4L2 device: %s", stream->dev->path);
US_LOG_INFO("Using desired FPS: %u", stream->dev->desired_fps);
us_stream_runtime_s *const run = stream->run;
us_device_s *const dev = stream->dev;
US_LOG_INFO("Using V4L2 device: %s", dev->path);
US_LOG_INFO("Using desired FPS: %u", dev->desired_fps);
atomic_store(&run->http_last_request_ts, us_get_now_monotonic());
if (stream->h264_sink != NULL) {
_RUN(h264) = us_h264_stream_init(stream->h264_sink, stream->h264_m2m_path, stream->h264_bitrate, stream->h264_gop);
run->h264 = us_h264_stream_init(stream->h264_sink, stream->h264_m2m_path, stream->h264_bitrate, stream->h264_gop);
}
for (us_workers_pool_s *pool; (pool = _stream_init_loop(stream)) != NULL;) {
long double grab_after = 0;
unsigned fluency_passed = 0;
unsigned captured_fps_accum = 0;
long long captured_fps_second = 0;
while (!_stream_init_loop(stream)) {
atomic_bool threads_stop;
atomic_init(&threads_stop, false);
pthread_mutex_t release_mutex;
US_MUTEX_INIT(release_mutex);
const uint n_releasers = dev->run->n_bufs;
_releaser_context_s *releasers;
US_CALLOC(releasers, n_releasers);
for (uint index = 0; index < n_releasers; ++index) {
_releaser_context_s *ctx = &releasers[index];
ctx->dev = dev;
ctx->queue = us_queue_init(1);
ctx->mutex = &release_mutex;
ctx->stop = &threads_stop;
US_THREAD_CREATE(ctx->tid, _releaser_thread, ctx);
}
_worker_context_s jpeg_ctx = {
.queue = us_queue_init(dev->run->n_bufs),
.stream = stream,
.stop = &threads_stop,
};
US_THREAD_CREATE(jpeg_ctx.tid, _jpeg_thread, &jpeg_ctx);
_worker_context_s h264_ctx;
if (run->h264 != NULL) {
h264_ctx.queue = us_queue_init(dev->run->n_bufs);
h264_ctx.stream = stream;
h264_ctx.stop = &threads_stop;
US_THREAD_CREATE(h264_ctx.tid, _h264_thread, &h264_ctx);
}
_worker_context_s raw_ctx;
if (stream->raw_sink != NULL) {
raw_ctx.queue = us_queue_init(2);
raw_ctx.stream = stream;
raw_ctx.stop = &threads_stop;
US_THREAD_CREATE(raw_ctx.tid, _raw_thread, &raw_ctx);
}
uint captured_fps_accum = 0;
sll captured_fps_ts = 0;
uint captured_fps = 0;
US_LOG_INFO("Capturing ...");
while (!atomic_load(&_RUN(stop))) {
US_SEP_DEBUG('-');
US_LOG_DEBUG("Waiting for worker ...");
us_worker_s *const ready_wr = us_workers_pool_wait(pool);
us_encoder_job_s *const ready_job = (us_encoder_job_s *)(ready_wr->job);
if (ready_job->hw != NULL) {
if (us_device_release_buffer(stream->dev, ready_job->hw) < 0) {
ready_wr->job_failed = true;
}
ready_job->hw = NULL;
if (!ready_wr->job_failed) {
if (ready_wr->job_timely) {
_stream_expose_frame(stream, ready_job->dest);
US_LOG_PERF("##### Encoded JPEG exposed; worker=%s, latency=%.3Lf",
ready_wr->name, us_get_now_monotonic() - ready_job->dest->grab_ts);
} else {
US_LOG_PERF("----- Encoded JPEG dropped; worker=%s", ready_wr->name);
}
} else {
break;
}
uint slowdown_count = 0;
while (!atomic_load(&run->stop) && !atomic_load(&threads_stop)) {
us_hw_buffer_s *hw;
const int buf_index = us_device_grab_buffer(dev, &hw);
switch (buf_index) {
case -2: continue; // Broken frame
case -1: goto close; // Error
}
assert(buf_index >= 0);
bool h264_force_key = false;
if (stream->slowdown) {
unsigned slc = 0;
for (; slc < 10 && !atomic_load(&_RUN(stop)) && !us_stream_has_clients(stream); ++slc) {
usleep(100000);
}
h264_force_key = (slc == 10);
const sll now_sec_ts = us_floor_ms(us_get_now_monotonic());
if (now_sec_ts != captured_fps_ts) {
captured_fps = captured_fps_accum;
captured_fps_accum = 0;
captured_fps_ts = now_sec_ts;
US_LOG_PERF_FPS("A new second has come; captured_fps=%u", captured_fps);
}
captured_fps_accum += 1;
if (atomic_load(&_RUN(stop))) {
break;
_stream_set_capture_state(stream, dev->run->width, dev->run->height, true, captured_fps);
# ifdef WITH_GPIO
us_gpio_set_stream_online(true);
# endif
us_device_buffer_incref(hw); // JPEG
us_queue_put(jpeg_ctx.queue, hw, 0);
if (run->h264 != NULL) {
us_device_buffer_incref(hw); // H264
us_queue_put(h264_ctx.queue, hw, 0);
}
if (stream->raw_sink != NULL) {
us_device_buffer_incref(hw); // RAW
us_queue_put(raw_ctx.queue, hw, 0);
}
us_queue_put(releasers[buf_index].queue, hw, 0); // Plan to release
bool has_read;
bool has_error;
const int selected = us_device_select(stream->dev, &has_read, &has_error);
if (selected < 0) {
if (errno != EINTR) {
US_LOG_PERROR("Mainloop select() error");
break;
}
} else if (selected == 0) { // Persistent timeout
# ifdef WITH_GPIO
us_gpio_set_stream_online(false);
# endif
} else {
if (has_read) {
# ifdef WITH_GPIO
us_gpio_set_stream_online(true);
# endif
const long double now = us_get_now_monotonic();
const long long now_second = us_floor_ms(now);
us_hw_buffer_s *hw;
const int buf_index = us_device_grab_buffer(stream->dev, &hw);
if (buf_index >= 0) {
if (now < grab_after) {
fluency_passed += 1;
US_LOG_VERBOSE("Passed %u frames for fluency: now=%.03Lf, grab_after=%.03Lf",
fluency_passed, now, grab_after);
if (us_device_release_buffer(stream->dev, hw) < 0) {
break;
}
} else {
fluency_passed = 0;
if (now_second != captured_fps_second) {
US_LOG_PERF_FPS("A new second has come; captured_fps=%u", captured_fps_accum);
atomic_store(&stream->run->captured_fps, captured_fps_accum);
captured_fps_accum = 0;
captured_fps_second = now_second;
}
captured_fps_accum += 1;
const long double fluency_delay = us_workers_pool_get_fluency_delay(pool, ready_wr);
grab_after = now + fluency_delay;
US_LOG_VERBOSE("Fluency: delay=%.03Lf, grab_after=%.03Lf", fluency_delay, grab_after);
ready_job->hw = hw;
us_workers_pool_assign(pool, ready_wr);
US_LOG_DEBUG("Assigned new frame in buffer=%d to worker=%s", buf_index, ready_wr->name);
_SINK_PUT(raw_sink, &hw->raw);
_H264_PUT(&hw->raw, h264_force_key);
}
} else if (buf_index != -2) { // -2 for broken frame
break;
}
}
if (has_error && us_device_consume_event(stream->dev) < 0) {
break;
// Мы не обновляем здесь состояние синков, потому что это происходит внутри обслуживающих их потоков
_stream_check_suicide(stream);
if (stream->slowdown && !_stream_has_any_clients_cached(stream)) {
usleep(100 * 1000);
slowdown_count = (slowdown_count + 1) % 10;
if (slowdown_count > 0) {
continue;
}
}
}
us_workers_pool_destroy(pool);
us_device_close(stream->dev);
close:
atomic_store(&threads_stop, true);
# ifdef WITH_GPIO
us_gpio_set_stream_online(false);
# endif
if (stream->raw_sink != NULL) {
US_THREAD_JOIN(raw_ctx.tid);
us_queue_destroy(raw_ctx.queue);
}
if (run->h264 != NULL) {
US_THREAD_JOIN(h264_ctx.tid);
us_queue_destroy(h264_ctx.queue);
}
US_THREAD_JOIN(jpeg_ctx.tid);
us_queue_destroy(jpeg_ctx.queue);
for (uint index = 0; index < n_releasers; ++index) {
US_THREAD_JOIN(releasers[index].tid);
us_queue_destroy(releasers[index].queue);
}
free(releasers);
US_MUTEX_DESTROY(release_mutex);
atomic_store(&threads_stop, false);
us_encoder_close(stream->enc);
us_device_close(dev);
if (!atomic_load(&run->stop)) {
US_SEP_INFO('=');
}
}
US_DELETE(_RUN(h264), us_h264_stream_destroy);
US_DELETE(run->h264, us_h264_stream_destroy);
}
void us_stream_loop_break(us_stream_s *stream) {
atomic_store(&_RUN(stop), true);
atomic_store(&stream->run->stop, true);
}
bool us_stream_has_clients(us_stream_s *stream) {
return (
atomic_load(&_RUN(http_has_clients))
// has_clients синков НЕ обновляются в реальном времени
|| (stream->sink != NULL && atomic_load(&stream->sink->has_clients))
|| (_RUN(h264) != NULL && /*_RUN(h264->sink) == NULL ||*/ atomic_load(&_RUN(h264->sink->has_clients)))
void us_stream_get_capture_state(us_stream_s *stream, uint *width, uint *height, bool *online, uint *captured_fps) {
const u64 state = atomic_load(&stream->run->http_capture_state);
*width = state & 0xFFFF;
*height = (state >> 16) & 0xFFFF;
*captured_fps = (state >> 32) & 0xFFFF;
*online = (state >> 48) & 1;
}
void _stream_set_capture_state(us_stream_s *stream, uint width, uint height, bool online, uint captured_fps) {
const u64 state = (
(u64)(width & 0xFFFF)
| ((u64)(height & 0xFFFF) << 16)
| ((u64)(captured_fps & 0xFFFF) << 32)
| ((u64)(online ? 1 : 0) << 48)
);
atomic_store(&stream->run->http_capture_state, state);
}
static us_workers_pool_s *_stream_init_loop(us_stream_s *stream) {
int access_errno = 0;
while (!atomic_load(&_RUN(stop))) {
atomic_store(&stream->run->captured_fps, 0);
_stream_expose_frame(stream, NULL);
static void *_releaser_thread(void *v_ctx) {
US_THREAD_SETTLE("str_rel")
_releaser_context_s *ctx = v_ctx;
if (access(stream->dev->path, R_OK|W_OK) < 0) {
if (access_errno != errno) {
US_SEP_INFO('=');
US_LOG_PERROR("Can't access device");
US_LOG_INFO("Waiting for the device access ...");
access_errno = errno;
while (!atomic_load(ctx->stop)) {
us_hw_buffer_s *hw;
if (us_queue_get(ctx->queue, (void**)&hw, 0.1) < 0) {
continue;
}
while (atomic_load(&hw->refs) > 0) {
if (atomic_load(ctx->stop)) {
goto done;
}
goto sleep_and_retry;
usleep(5 * 1000);
}
US_SEP_INFO('=');
access_errno = 0;
stream->dev->dma_export = (
stream->enc->type == US_ENCODER_TYPE_M2M_VIDEO
|| stream->enc->type == US_ENCODER_TYPE_M2M_IMAGE
|| _RUN(h264) != NULL
);
if (us_device_open(stream->dev) == 0) {
return us_encoder_workers_pool_init(stream->enc, stream->dev);
US_MUTEX_LOCK(*ctx->mutex);
const int released = us_device_release_buffer(ctx->dev, hw);
US_MUTEX_UNLOCK(*ctx->mutex);
if (released < 0) {
goto done;
}
US_LOG_INFO("Sleeping %u seconds before new stream init ...", stream->error_delay);
}
sleep_and_retry:
sleep(stream->error_delay);
done:
atomic_store(ctx->stop, true); // Stop all other guys on error
return NULL;
}
static void *_jpeg_thread(void *v_ctx) {
US_THREAD_SETTLE("str_jpeg")
_worker_context_s *ctx = v_ctx;
us_stream_s *stream = ctx->stream;
ldf grab_after_ts = 0;
uint fluency_passed = 0;
while (!atomic_load(ctx->stop)) {
us_worker_s *const ready_wr = us_workers_pool_wait(stream->enc->run->pool);
us_encoder_job_s *const ready_job = ready_wr->job;
if (ready_job->hw != NULL) {
us_device_buffer_decref(ready_job->hw);
ready_job->hw = NULL;
if (ready_wr->job_failed) {
// pass
} else if (ready_wr->job_timely) {
_stream_expose_jpeg(stream, ready_job->dest);
if (atomic_load(&stream->run->http_snapshot_requested) > 0) { // Process real snapshots
atomic_fetch_sub(&stream->run->http_snapshot_requested, 1);
}
US_LOG_PERF("JPEG: ##### Encoded JPEG exposed; worker=%s, latency=%.3Lf",
ready_wr->name, us_get_now_monotonic() - ready_job->dest->grab_ts);
} else {
US_LOG_PERF("JPEG: ----- Encoded JPEG dropped; worker=%s", ready_wr->name);
}
}
us_hw_buffer_s *hw = _get_latest_hw(ctx->queue);
if (hw == NULL) {
continue;
}
const bool update_required = (stream->jpeg_sink != NULL && us_memsink_server_check(stream->jpeg_sink, NULL));
if (!update_required && !_stream_has_jpeg_clients_cached(stream)) {
US_LOG_VERBOSE("JPEG: Passed encoding because nobody is watching");
us_device_buffer_decref(hw);
continue;
}
const ldf now_ts = us_get_now_monotonic();
if (now_ts < grab_after_ts) {
fluency_passed += 1;
US_LOG_VERBOSE("JPEG: Passed %u frames for fluency: now=%.03Lf, grab_after=%.03Lf",
fluency_passed, now_ts, grab_after_ts);
us_device_buffer_decref(hw);
continue;
}
fluency_passed = 0;
const ldf fluency_delay = us_workers_pool_get_fluency_delay(stream->enc->run->pool, ready_wr);
grab_after_ts = now_ts + fluency_delay;
US_LOG_VERBOSE("JPEG: Fluency: delay=%.03Lf, grab_after=%.03Lf", fluency_delay, grab_after_ts);
ready_job->hw = hw;
us_workers_pool_assign(stream->enc->run->pool, ready_wr);
US_LOG_DEBUG("JPEG: Assigned new frame in buffer=%d to worker=%s", hw->buf.index, ready_wr->name);
}
return NULL;
}
static void _stream_expose_frame(us_stream_s *stream, us_frame_s *frame) {
static void *_h264_thread(void *v_ctx) {
US_THREAD_SETTLE("str_h264");
_worker_context_s *ctx = v_ctx;
us_h264_stream_s *h264 = ctx->stream->run->h264;
ldf grab_after_ts = 0;
ldf last_encode_ts = us_get_now_monotonic();
while (!atomic_load(ctx->stop)) {
us_hw_buffer_s *hw = _get_latest_hw(ctx->queue);
if (hw == NULL) {
continue;
}
if (!us_memsink_server_check(h264->sink, NULL)) {
us_device_buffer_decref(hw);
US_LOG_VERBOSE("H264: Passed encoding because nobody is watching");
continue;
}
if (hw->raw.grab_ts < grab_after_ts) {
us_device_buffer_decref(hw);
US_LOG_VERBOSE("H264: Passed encoding for FPS limit: %u", h264->enc->run->fps_limit);
continue;
}
// Форсим кейфрейм, если от захвата давно не было фреймов
const ldf now_ts = us_get_now_monotonic();
const bool force_key = (last_encode_ts + 0.5 < now_ts);
us_h264_stream_process(h264, &hw->raw, force_key);
last_encode_ts = now_ts;
// M2M-енкодер увеличивает задержку на 100 милисекунд при 1080p, если скормить ему больше 30 FPS.
// Поэтому у нас есть два режима: 60 FPS для маленьких видео и 30 для 1920x1080(1200).
// Следующй фрейм захватывается не раньше, чем это требуется по FPS, минус небольшая
// погрешность (если захват неравномерный) - немного меньше 1/60, и примерно треть от 1/30.
const ldf frame_interval = (ldf)1 / h264->enc->run->fps_limit;
grab_after_ts = hw->raw.grab_ts + frame_interval - 0.01;
us_device_buffer_decref(hw);
}
return NULL;
}
static void *_raw_thread(void *v_ctx) {
US_THREAD_SETTLE("str_raw");
_worker_context_s *ctx = v_ctx;
while (!atomic_load(ctx->stop)) {
us_hw_buffer_s *hw = _get_latest_hw(ctx->queue);
if (hw == NULL) {
continue;
}
if (!us_memsink_server_check(ctx->stream->raw_sink, NULL)) {
us_device_buffer_decref(hw);
US_LOG_VERBOSE("RAW: Passed publishing because nobody is watching");
continue;
}
us_memsink_server_put(ctx->stream->raw_sink, &hw->raw, false);
us_device_buffer_decref(hw);
}
return NULL;
}
static us_hw_buffer_s *_get_latest_hw(us_queue_s *queue) {
us_hw_buffer_s *hw;
if (us_queue_get(queue, (void**)&hw, 0.1) < 0) {
return NULL;
}
while (!us_queue_is_empty(queue)) { // Берем только самый свежий кадр
us_device_buffer_decref(hw);
assert(!us_queue_get(queue, (void**)&hw, 0));
}
return hw;
}
static bool _stream_has_jpeg_clients_cached(us_stream_s *stream) {
const us_stream_runtime_s *const run = stream->run;
return (
atomic_load(&run->http_has_clients)
|| (atomic_load(&run->http_snapshot_requested) > 0)
|| (stream->jpeg_sink != NULL && atomic_load(&stream->jpeg_sink->has_clients))
);
}
static bool _stream_has_any_clients_cached(us_stream_s *stream) {
const us_stream_runtime_s *const run = stream->run;
return (
_stream_has_jpeg_clients_cached(stream)
|| (run->h264 != NULL && atomic_load(&run->h264->sink->has_clients))
|| (stream->raw_sink != NULL && atomic_load(&stream->raw_sink->has_clients))
);
}
static int _stream_init_loop(us_stream_s *stream) {
us_stream_runtime_s *const run = stream->run;
us_blank_s *const blank = run->blank;
us_frame_s *new = NULL;
bool waiting_reported = false;
while (!atomic_load(&stream->run->stop)) {
# ifdef WITH_GPIO
us_gpio_set_stream_online(false);
# endif
if (frame != NULL) {
new = frame;
_RUN(last_as_blank_ts) = 0; // Останавливаем таймер
US_LOG_DEBUG("Exposed ALIVE video frame");
} else {
unsigned width = stream->dev->run->width;
unsigned height = stream->dev->run->height;
if (width == 0 || height == 0) {
width = stream->dev->width;
height = stream->dev->height;
// Флаги has_clients у синков не обновляются сами по себе, поэтому обновим их
// на каждой итерации старта стрима. После старта этим будут заниматься воркеры.
if (stream->jpeg_sink != NULL) {
us_memsink_server_check(stream->jpeg_sink, NULL);
}
if (stream->run->h264 != NULL) {
us_memsink_server_check(stream->run->h264->sink, NULL);
}
if (stream->raw_sink != NULL) {
us_memsink_server_check(stream->raw_sink, NULL);
}
us_blank_draw(blank, "< NO SIGNAL >", width, height);
if (run->last_online) { // Если переходим из online в offline
if (stream->last_as_blank < 0) { // Если last_as_blank выключен, просто покажем старую картинку
new = blank->jpeg;
US_LOG_INFO("Changed video frame to BLANK");
} else if (stream->last_as_blank > 0) { // // Если нужен таймер - запустим
_RUN(last_as_blank_ts) = us_get_now_monotonic() + stream->last_as_blank;
US_LOG_INFO("Freezed last ALIVE video frame for %d seconds", stream->last_as_blank);
} else { // last_as_blank == 0 - показываем последний фрейм вечно
US_LOG_INFO("Freezed last ALIVE video frame forever");
_stream_check_suicide(stream);
stream->dev->dma_export = (
stream->enc->type == US_ENCODER_TYPE_M2M_VIDEO
|| stream->enc->type == US_ENCODER_TYPE_M2M_IMAGE
|| run->h264 != NULL
);
switch (us_device_open(stream->dev)) {
case -2:
if (!waiting_reported) {
waiting_reported = true;
US_LOG_INFO("Waiting for the capture device ...");
}
goto offline_and_retry;
case -1:
waiting_reported = false;
goto offline_and_retry;
default: break;
}
us_encoder_open(stream->enc, stream->dev);
return 0;
offline_and_retry:
for (uint count = 0; count < stream->error_delay * 10; ++count) {
if (atomic_load(&run->stop)) {
break;
}
} else if (stream->last_as_blank < 0) {
new = blank->jpeg;
// US_LOG_INFO("Changed video frame to BLANK");
}
if (count % 10 == 0) {
// Каждую секунду повторяем blank
uint width = stream->dev->run->width;
uint height = stream->dev->run->height;
if (width == 0 || height == 0) {
width = stream->dev->width;
height = stream->dev->height;
}
us_blank_draw(run->blank, "< NO SIGNAL >", width, height);
if ( // Если уже оффлайн, включена фича last_as_blank с таймером и он запущен
stream->last_as_blank > 0
&& _RUN(last_as_blank_ts) != 0
&& _RUN(last_as_blank_ts) < us_get_now_monotonic()
) {
new = blank->jpeg;
_RUN(last_as_blank_ts) = 0; // Останавливаем таймер
US_LOG_INFO("Changed last ALIVE video frame to BLANK");
_stream_set_capture_state(stream, width, height, false, 0);
_stream_expose_jpeg(stream, run->blank->jpeg);
if (run->h264 != NULL) {
us_h264_stream_process(run->h264, run->blank->raw, true);
}
_stream_expose_raw(stream, run->blank->raw);
}
usleep(100 * 1000);
}
}
return -1;
}
int ri = -1;
while (
!atomic_load(&_RUN(stop))
&& ((ri = us_ring_producer_acquire(run->http_jpeg_ring, 0)) < 0)
) {
US_LOG_ERROR("Can't push JPEG to HTTP ring (no free slots)");
static void _stream_expose_jpeg(us_stream_s *stream, const us_frame_s *frame) {
us_stream_runtime_s *const run = stream->run;
int ri;
while ((ri = us_ring_producer_acquire(run->http_jpeg_ring, 0)) < 0) {
if (atomic_load(&run->stop)) {
return;
}
}
if (ri < 0) {
return;
}
us_frame_s *const dest = run->http_jpeg_ring->items[ri];
if (new == NULL) {
dest->used = 0;
dest->online = false;
} else {
us_frame_copy(new, dest);
dest->online = true;
}
run->last_online = (frame != NULL);
us_frame_copy(frame, dest);
us_ring_producer_release(run->http_jpeg_ring, ri);
_SINK_PUT(sink, (frame != NULL ? frame : blank->jpeg));
if (frame == NULL) {
_SINK_PUT(raw_sink, blank->raw);
_H264_PUT(blank->raw, false);
if (stream->jpeg_sink != NULL) {
us_memsink_server_put(stream->jpeg_sink, dest, NULL);
}
}
static void _stream_expose_raw(us_stream_s *stream, const us_frame_s *frame) {
if (stream->raw_sink != NULL) {
us_memsink_server_put(stream->raw_sink, frame, NULL);
}
}
static void _stream_check_suicide(us_stream_s *stream) {
if (stream->exit_on_no_clients == 0) {
return;
}
us_stream_runtime_s *const run = stream->run;
const ldf now_ts = us_get_now_monotonic();
const ull http_last_request_ts = atomic_load(&run->http_last_request_ts); // Seconds
if (_stream_has_any_clients_cached(stream)) {
atomic_store(&run->http_last_request_ts, now_ts);
} else if (http_last_request_ts + stream->exit_on_no_clients < now_ts) {
US_LOG_INFO("No requests or HTTP/sink clients found in last %u seconds, exiting ...",
stream->exit_on_no_clients);
us_process_suicide();
atomic_store(&run->http_last_request_ts, now_ts);
}
}

View File

@@ -22,42 +22,29 @@
#pragma once
#include <stdlib.h>
#include <stdbool.h>
#include <stdatomic.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
#include <pthread.h>
#include <linux/videodev2.h>
#include "../libs/tools.h"
#include "../libs/threading.h"
#include "../libs/logging.h"
#include "../libs/types.h"
#include "../libs/queue.h"
#include "../libs/ring.h"
#include "../libs/frame.h"
#include "../libs/memsink.h"
#include "../libs/device.h"
#include "blank.h"
#include "encoder.h"
#include "workers.h"
#include "h264.h"
#ifdef WITH_GPIO
# include "gpio/gpio.h"
#endif
typedef struct {
us_h264_stream_s *h264;
us_ring_s *http_jpeg_ring;
atomic_bool http_has_clients;
atomic_uint captured_fps;
bool last_online;
long double last_as_blank_ts;
us_h264_stream_s *h264;
atomic_uint http_snapshot_requested;
atomic_ullong http_last_request_ts; // Seconds
atomic_ullong http_capture_state; // Bits
us_blank_s *blank;
@@ -70,14 +57,15 @@ typedef struct {
int last_as_blank;
bool slowdown;
unsigned error_delay;
uint error_delay;
uint exit_on_no_clients;
us_memsink_s *sink;
us_memsink_s *jpeg_sink;
us_memsink_s *raw_sink;
us_memsink_s *h264_sink;
unsigned h264_bitrate;
unsigned h264_gop;
uint h264_bitrate;
uint h264_gop;
char *h264_m2m_path;
us_stream_runtime_s *run;
@@ -90,4 +78,4 @@ void us_stream_destroy(us_stream_s *stream);
void us_stream_loop(us_stream_s *stream);
void us_stream_loop_break(us_stream_s *stream);
bool us_stream_has_clients(us_stream_s *stream);
void us_stream_get_capture_state(us_stream_s *stream, uint *width, uint *height, bool *online, uint *captured_fps);

View File

@@ -178,7 +178,7 @@ long double us_workers_pool_get_fluency_delay(us_workers_pool_s *pool, const us_
static void *_worker_thread(void *v_worker) {
us_worker_s *wr = (us_worker_s *)v_worker;
US_THREAD_RENAME("%s", wr->name);
US_THREAD_SETTLE("%s", wr->name);
US_LOG_DEBUG("Hello! I am a worker %s ^_^", wr->name);
while (!atomic_load(&wr->pool->stop)) {

View File

@@ -75,7 +75,8 @@ us_drm_s *us_drm_init(void) {
us_drm_s *drm;
US_CALLOC(drm, 1);
drm->path = "/dev/dri/card0";
// drm->path = "/dev/dri/card0";
drm->path = "/dev/dri/by-path/platform-gpu-card";
drm->port = "HDMI-A-1";
drm->n_bufs = 4;
drm->timeout = 5;

View File

@@ -25,7 +25,6 @@
#include <stdatomic.h>
#include <string.h>
#include <unistd.h>
#include <signal.h>
#include <getopt.h>
#include <errno.h>
#include <assert.h>
@@ -41,6 +40,7 @@
#include "../libs/tools.h"
#include "../libs/logging.h"
#include "../libs/device.h"
#include "../libs/signal.h"
#include "../libs/options.h"
#include "drm.h"
@@ -82,7 +82,6 @@ atomic_bool _g_ustreamer_online = false;
static void _signal_handler(int signum);
static void _install_signal_handlers(void);
static void _main_loop();
static void *_follower_thread(void *v_unix_follow);
@@ -137,13 +136,13 @@ int main(int argc, char *argv[]) {
# undef OPT_NUMBER
# undef OPT_SET
_install_signal_handlers();
us_install_signals_handler(_signal_handler, false);
pthread_t follower_tid;
if (unix_follow != NULL) {
US_THREAD_CREATE(follower_tid, _follower_thread, unix_follow);
}
_main_loop(unix_follow);
_main_loop();
if (unix_follow != NULL) {
US_THREAD_JOIN(follower_tid);
}
@@ -159,25 +158,6 @@ static void _signal_handler(int signum) {
atomic_store(&_g_stop, true);
}
static void _install_signal_handlers(void) {
struct sigaction sig_act = {0};
assert(!sigemptyset(&sig_act.sa_mask));
sig_act.sa_handler = _signal_handler;
assert(!sigaddset(&sig_act.sa_mask, SIGINT));
assert(!sigaddset(&sig_act.sa_mask, SIGTERM));
assert(!sigaddset(&sig_act.sa_mask, SIGPIPE));
US_LOG_DEBUG("Installing SIGINT handler ...");
assert(!sigaction(SIGINT, &sig_act, NULL));
US_LOG_DEBUG("Installing SIGTERM handler ...");
assert(!sigaction(SIGTERM, &sig_act, NULL));
US_LOG_DEBUG("Installing SIGTERM handler ...");
assert(!sigaction(SIGPIPE, &sig_act, NULL));
}
static void _main_loop(void) {
us_drm_s *drm = us_drm_init();
drm->port = "HDMI-A-2";
@@ -203,6 +183,9 @@ static void _main_loop(void) {
}
if (us_device_open(dev) < 0) {
if (us_drm_wait_for_vsync(drm) == 0) {
us_drm_expose(drm, US_DRM_EXPOSE_NO_SIGNAL, NULL, 0);
}
goto close;
}
@@ -216,40 +199,21 @@ static void _main_loop(void) {
continue;
}
bool has_read;
bool has_error;
const int selected = us_device_select(dev, &has_read, &has_error);
us_hw_buffer_s *hw;
const int buf_index = us_device_grab_buffer(dev, &hw);
switch (buf_index) {
case -2: continue; // Broken frame
case -1: goto close; // Any error
}
assert(buf_index >= 0);
if (selected < 0) {
if (errno != EINTR) {
US_LOG_PERROR("Mainloop select() error");
goto close;
}
} else if (selected == 0) { // Persistent timeout
if (us_drm_expose(drm, US_DRM_EXPOSE_NO_SIGNAL, NULL, 0) < 0) {
_slowdown();
continue;
}
} else {
if (has_read) {
us_hw_buffer_s *hw;
const int buf_index = us_device_grab_buffer(dev, &hw);
if (buf_index >= 0) {
const int exposed = us_drm_expose(drm, US_DRM_EXPOSE_FRAME, &hw->raw, dev->run->hz);
if (us_device_release_buffer(dev, hw) < 0) {
goto close;
}
if (exposed < 0) {
_slowdown();
continue;
}
} else if (buf_index != -2) { // -2 for broken frame
goto close;
}
}
if (has_error && us_device_consume_event(dev) < 0) {
goto close;
}
const int exposed = us_drm_expose(drm, US_DRM_EXPOSE_FRAME, &hw->raw, dev->run->hz);
if (us_device_release_buffer(dev, hw) < 0) {
goto close;
}
if (exposed < 0) {
_slowdown();
continue;
}
}
@@ -263,11 +227,10 @@ static void _main_loop(void) {
}
static void *_follower_thread(void *v_unix_follow) {
US_THREAD_SETTLE("follower");
const char *path = v_unix_follow;
assert(path != NULL);
US_THREAD_RENAME("follower");
while (!atomic_load(&_g_stop)) {
int fd = socket(AF_UNIX, SOCK_STREAM, 0);
assert(fd >= 0);