Compare commits

...

83 Commits
v5.56 ... v6.5

Author SHA1 Message Date
Maxim Devaev
408157c82b Bump version: 6.4 → 6.5 2024-03-21 13:33:29 +02:00
Maxim Devaev
7356dea737 renamed options --sink* to --jpeg-sink* 2024-03-21 13:21:51 +02:00
Maxim Devaev
87a75a816a memsink: suffix-based memory limites 2024-03-20 22:55:37 +02:00
Maxim Devaev
b6a2332207 refactoring 2024-03-20 17:51:56 +02:00
Maxim Devaev
34c0dcb1ce refactoring 2024-03-19 19:33:55 +02:00
Maxim Devaev
283f31a5a6 Bump version: 6.3 → 6.4 2024-03-17 10:44:37 +02:00
Maxim Devaev
2f1264c916 janus: rtp orientation support 2024-03-17 10:42:52 +02:00
Maxim Devaev
69e7cbf746 refactoring 2024-03-16 23:20:38 +02:00
Maxim Devaev
05804e309f Bump version: 6.2 → 6.3 2024-03-14 16:36:20 +02:00
Maxim Devaev
1d8c93d3ad Issue #253: Added m2m encoder timeout 2024-03-14 16:06:32 +02:00
Maxim Devaev
f48695a04e refactoring 2024-03-13 13:09:27 +02:00
Maxim Devaev
8ac2fa201b Bump version: 6.1 → 6.2 2024-03-10 18:11:26 +02:00
Maxim Devaev
2d9e51a1ca v4p: turn off the display after timeout 2024-03-10 16:05:50 +00:00
Maxim Devaev
c4cf4f015b v4p: wait for dma_fd on close 2024-03-10 16:49:49 +02:00
Maxim Devaev
646afbffff refactoring 2024-03-10 12:30:22 +02:00
Maxim Devaev
6475eeef4c refactoring 2024-03-10 12:25:08 +02:00
Maxim Devaev
2e67a46eb8 refactoring 2024-03-10 12:10:09 +02:00
Maxim Devaev
c333e75dff v4p: dpms 2024-03-09 19:55:22 +00:00
Maxim Devaev
72285023cb refactoring 2024-03-09 12:13:17 +02:00
Maxim Devaev
b00a6ffd8d Bump version: 6.0 → 6.1 2024-03-09 04:33:44 +02:00
Maxim Devaev
ce935c431e v4p: changed logging levels 2024-03-09 04:32:12 +02:00
Maxim Devaev
ac0944ae1a v4p: added some checks and asserts 2024-03-09 04:29:50 +02:00
Maxim Devaev
66572806a2 fix 2024-03-09 03:28:13 +02:00
Maxim Devaev
a75d6487e3 font info 2024-03-09 03:28:13 +02:00
Maxim Devaev
897ad4951b v4p: dma support 2024-03-09 01:27:50 +00:00
Maxim Devaev
e1ef86146f Bump version: 5.59 → 6.0 2024-03-06 21:50:47 +02:00
Maxim Devaev
8f3a475a32 Bump version: 5.58 → 5.59 2024-03-06 20:56:38 +02:00
Maxim Devaev
be5f63d64d noted TC358743 errors 2024-03-06 01:02:52 +02:00
Maxim Devaev
40e17b05b3 memsink client: bump last_as_blank_ts on every flock() 2024-03-06 00:43:15 +02:00
Maxim Devaev
0b8940d93d limit fps by m2m hardware to reduce latency 2024-03-05 21:49:39 +02:00
Maxim Devaev
e92002c3d8 repeat blank every second on offline 2024-03-05 14:32:45 +02:00
Maxim Devaev
e558b0f1a1 Issue #264: Bring back BSD compatibility in strerror hacks 2024-03-05 14:19:55 +02:00
Maxim Devaev
b5784149b2 fix 2024-03-05 13:49:18 +02:00
Maxim Devaev
55b6a3e933 improved logging 2024-03-05 13:47:29 +02:00
Maxim Devaev
f7c2948477 improved memsink checks performance 2024-03-05 13:44:54 +02:00
Maxim Devaev
c55b6c4d7d lint fix 2024-03-04 17:39:39 +02:00
Maxim Devaev
442790486c fixed empty label for goto 2024-03-04 17:19:30 +02:00
Maxim Devaev
bbc7ceb110 fix 2024-03-04 08:32:09 +02:00
Maxim Devaev
2ffa561eb1 reduced snapshot timeout to error_delay*3 2024-03-04 07:47:14 +02:00
Maxim Devaev
490d833983 refactoring 2024-03-04 07:22:48 +02:00
Maxim Devaev
0b3a1eb963 deprecated --last-as-blank 2024-03-04 07:18:45 +02:00
Maxim Devaev
7fd5eb229f fixed offline state 2024-03-04 03:54:16 +02:00
Maxim Devaev
98b5e52a68 block signals in threads 2024-03-04 03:38:45 +02:00
Maxim Devaev
c8dc5119fe signal lib to reduce duplicating code 2024-03-04 03:12:14 +02:00
Maxim Devaev
b556dfb897 improved persistent logic 2024-03-04 01:50:34 +02:00
Maxim Devaev
06eda04180 always generate blanks for offline snapshots 2024-03-03 22:15:25 +02:00
Maxim Devaev
05bba86c63 refactoring 2024-03-03 21:28:25 +02:00
Maxim Devaev
6827a72097 failed if dv timings are not available 2024-03-03 20:04:47 +02:00
Maxim Devaev
299b3886af improved messages 2024-03-03 19:02:59 +02:00
Maxim Devaev
f9bc5666b8 refactoring 2024-03-03 18:44:13 +02:00
Maxim Devaev
c9cb0a416e fixed persistent timeout 2024-03-03 08:23:18 +02:00
Maxim Devaev
ffa68a86a6 refactoring 2024-03-03 08:10:33 +02:00
Maxim Devaev
8fe411aa8b compress only lastest frame 2024-03-03 07:05:00 +02:00
Maxim Devaev
36dd5d1533 pass encoders if there is no clients 2024-03-03 06:27:19 +02:00
Maxim Devaev
33b9bff0b9 atomic capture state for http 2024-03-03 06:04:48 +02:00
Maxim Devaev
c24d6338e2 Issue #228: Request fresh snapshot from jpeg encoder 2024-03-03 04:59:12 +02:00
Maxim Devaev
8cb6fc4e78 refactoring 2024-03-03 03:24:40 +02:00
Maxim Devaev
a9dfff84e6 fix 2024-03-03 02:55:13 +02:00
Maxim Devaev
988a91634a fixed force_key logic for slowdown 2024-03-03 02:51:53 +02:00
Maxim Devaev
8f6df3b455 Issue #263: -latomic is required now 2024-03-03 01:45:16 +02:00
Maxim Devaev
ef47fa4c74 jpeg in a separate thread 2024-03-03 01:04:06 +02:00
Maxim Devaev
f2f560a345 h264 encoder in separate thread 2024-03-02 23:06:06 +02:00
Maxim Devaev
6a0ee68692 renamed captured_fps to http_captured_fps 2024-03-02 21:33:55 +02:00
Maxim Devaev
72741b90f4 releaser threads 2024-03-02 21:07:46 +02:00
Maxim Devaev
0296ab60c3 added device timeout error message 2024-03-02 20:16:34 +02:00
Maxim Devaev
77a53347c3 refactoring 2024-03-02 19:37:50 +02:00
Maxim Devaev
c32ea286f2 Bump version: 5.57 → 5.58 2024-03-02 19:11:06 +02:00
Maxim Devaev
b2fb857f5b refactoring 2024-03-02 19:09:53 +02:00
Maxim Devaev
20cdabc8a4 lint fix 2024-03-02 10:42:30 +02:00
Maxim Devaev
e2f4c193e3 refactoring 2024-03-02 10:08:06 +02:00
Maxim Devaev
b4aa9593dc refactoring 2024-03-02 09:59:41 +02:00
Maxim Devaev
20c729893b refactoring 2024-03-02 09:56:44 +02:00
Maxim Devaev
a00f49331c wait (select) device in grab function 2024-03-02 09:33:45 +02:00
Maxim Devaev
85308e48fd stream: null hw buffer pointer after encoding 2024-03-02 07:54:38 +02:00
Maxim Devaev
ff08a0fb25 refactoring 2024-03-02 07:23:18 +02:00
Maxim Devaev
6145b69c97 refactoring 2024-03-02 02:09:54 +02:00
Maxim Devaev
cfc5ae1b94 v4p: using /dev/dri/by-path/platform-gpu-card instead of card0 2024-03-02 01:14:15 +02:00
Maxim Devaev
54b221aabd moved exit_on_no_clients logic from http to stream loop 2024-03-01 09:40:51 +02:00
Maxim Devaev
dabee9d47a gitignores ustreamer-v4p 2024-03-01 08:22:14 +02:00
Maxim Devaev
e30520d9f3 refactoring 2024-03-01 08:11:37 +02:00
Maxim Devaev
8f0acb2176 Bump version: 5.56 → 5.57 2024-03-01 04:33:13 +02:00
Maxim Devaev
8edeff8160 fixed makefile 2024-03-01 04:21:45 +02:00
Maxim Devaev
cacec0d25c refactoring 2024-03-01 04:09:16 +02:00
76 changed files with 2508 additions and 1705 deletions

View File

@@ -1,7 +1,7 @@
[bumpversion]
commit = True
tag = True
current_version = 5.56
current_version = 6.5
parse = (?P<major>\d+)\.(?P<minor>\d+)
serialize =
{major}.{minor}

2
.gitignore vendored
View File

@@ -7,7 +7,7 @@
/python/ustreamer.egg-info/
/janus/build/
/ustreamer
/ustreamer-dump
/ustreamer-*
/config.mk
vgcore.*
*.sock

View File

@@ -36,8 +36,8 @@ endif
apps:
$(MAKE) -C src
for i in src/ustreamer.bin src/ustreamer-*.bin; do \
test ! -x $$i || ln -sf $$i .; \
for i in src/*.bin; do \
test ! -x $$i || ln -sf $$i `basename $$i .bin`; \
done

View File

@@ -85,13 +85,13 @@ Without arguments, ```ustreamer``` will try to open ```/dev/video0``` with 640x4
:exclamation: Please note that since µStreamer v2.0 cross-domain requests were disabled by default for [security reasons](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS). To enable the old behavior, use the option `--allow-origin=\*`.
The recommended way of running µStreamer with [Auvidea B101](https://www.raspberrypi.org/forums/viewtopic.php?f=38&t=120702&start=400#p1339178) on Raspberry Pi:
The recommended way of running µStreamer with [TC358743-based capture device](https://www.raspberrypi.org/forums/viewtopic.php?f=38&t=120702&start=400#p1339178) on Raspberry Pi:
```
$ ./ustreamer \
--format=uyvy \ # Device input format
--encoder=m2m-image \ # Hardware encoding on V4L2 M2M driver
--workers=3 \ # Workers number
--persistent \ # Don't re-initialize device on timeout (for example when HDMI cable was disconnected)
--persistent \ # Suppress repetitive signal source errors (for example when HDMI cable was disconnected)
--dv-timings \ # Use DV-timings
--drop-same-frames=30 # Save the traffic
```

View File

@@ -22,6 +22,23 @@
#include "audio.h"
#include <stdlib.h>
#include <stdatomic.h>
#include <assert.h>
#include <pthread.h>
#include <alsa/asoundlib.h>
#include <speex/speex_resampler.h>
#include <opus/opus.h>
#include "uslibs/types.h"
#include "uslibs/tools.h"
#include "uslibs/array.h"
#include "uslibs/ring.h"
#include "uslibs/threading.h"
#include "logging.h"
#define _JLOG_PERROR_ALSA(_err, _prefix, _msg, ...) US_JLOG_ERROR(_prefix, _msg ": %s", ##__VA_ARGS__, snd_strerror(_err))
#define _JLOG_PERROR_RES(_err, _prefix, _msg, ...) US_JLOG_ERROR(_prefix, _msg ": %s", ##__VA_ARGS__, speex_resampler_strerror(_err))
@@ -31,7 +48,7 @@
// - https://github.com/xiph/opus/blob/7b05f44/src/opus_demo.c#L368
#define _HZ_TO_FRAMES(_hz) (6 * (_hz) / 50) // 120ms
#define _HZ_TO_BUF16(_hz) (_HZ_TO_FRAMES(_hz) * 2) // One stereo frame = (16bit L) + (16bit R)
#define _HZ_TO_BUF8(_hz) (_HZ_TO_BUF16(_hz) * sizeof(int16_t))
#define _HZ_TO_BUF8(_hz) (_HZ_TO_BUF16(_hz) * sizeof(s16))
#define _MIN_PCM_HZ 8000
#define _MAX_PCM_HZ 192000
@@ -41,13 +58,13 @@
typedef struct {
int16_t data[_MAX_BUF16];
s16 data[_MAX_BUF16];
} _pcm_buffer_s;
typedef struct {
uint8_t data[_MAX_BUF8]; // Worst case
size_t used;
uint64_t pts;
u8 data[_MAX_BUF8]; // Worst case
uz used;
u64 pts;
} _enc_buffer_s;
@@ -71,7 +88,7 @@ bool us_audio_probe(const char *name) {
return true;
}
us_audio_s *us_audio_init(const char *name, unsigned pcm_hz) {
us_audio_s *us_audio_init(const char *name, uint pcm_hz) {
us_audio_s *audio;
US_CALLOC(audio, 1);
audio->pcm_hz = pcm_hz;
@@ -162,7 +179,7 @@ void us_audio_destroy(us_audio_s *audio) {
free(audio);
}
int us_audio_get_encoded(us_audio_s *audio, uint8_t *data, size_t *size, uint64_t *pts) {
int us_audio_get_encoded(us_audio_s *audio, u8 *data, uz *size, u64 *pts) {
if (atomic_load(&audio->stop)) {
return -1;
}
@@ -170,7 +187,7 @@ int us_audio_get_encoded(us_audio_s *audio, uint8_t *data, size_t *size, uint64_
if (ri < 0) {
return -2;
}
_enc_buffer_s *const buf = audio->enc_ring->items[ri];
const _enc_buffer_s *const buf = audio->enc_ring->items[ri];
if (*size < buf->used) {
us_ring_consumer_release(audio->enc_ring, ri);
return -3;
@@ -195,10 +212,10 @@ static _enc_buffer_s *_enc_buffer_init(void) {
}
static void *_pcm_thread(void *v_audio) {
US_THREAD_RENAME("us_a_pcm");
US_THREAD_SETTLE("us_a_pcm");
us_audio_s *const audio = (us_audio_s *)v_audio;
uint8_t in[_MAX_BUF8];
us_audio_s *const audio = v_audio;
u8 in[_MAX_BUF8];
while (!atomic_load(&audio->stop)) {
const int frames = snd_pcm_readi(audio->pcm, in, audio->pcm_frames);
@@ -225,10 +242,10 @@ static void *_pcm_thread(void *v_audio) {
}
static void *_encoder_thread(void *v_audio) {
US_THREAD_RENAME("us_a_enc");
US_THREAD_SETTLE("us_a_enc");
us_audio_s *const audio = (us_audio_s *)v_audio;
int16_t in_res[_MAX_BUF16];
us_audio_s *const audio = v_audio;
s16 in_res[_MAX_BUF16];
while (!atomic_load(&audio->stop)) {
const int in_ri = us_ring_consumer_acquire(audio->pcm_ring, 0.1);
@@ -237,11 +254,11 @@ static void *_encoder_thread(void *v_audio) {
}
_pcm_buffer_s *const in = audio->pcm_ring->items[in_ri];
int16_t *in_ptr;
s16 *in_ptr;
if (audio->res != NULL) {
assert(audio->pcm_hz != _ENCODER_INPUT_HZ);
uint32_t in_count = audio->pcm_frames;
uint32_t out_count = _HZ_TO_FRAMES(_ENCODER_INPUT_HZ);
u32 in_count = audio->pcm_frames;
u32 out_count = _HZ_TO_FRAMES(_ENCODER_INPUT_HZ);
speex_resampler_process_interleaved_int(audio->res, in->data, &in_count, in_res, &out_count);
in_ptr = in_res;
} else {

View File

@@ -22,50 +22,40 @@
#pragma once
#include <stdlib.h>
#include <stdint.h>
#include <stdbool.h>
#include <stdatomic.h>
#include <assert.h>
#include <sys/types.h>
#include <pthread.h>
#include <alsa/asoundlib.h>
#include <speex/speex_resampler.h>
#include <opus/opus.h>
#include "uslibs/tools.h"
#include "uslibs/array.h"
#include "uslibs/types.h"
#include "uslibs/ring.h"
#include "uslibs/threading.h"
#include "logging.h"
typedef struct {
snd_pcm_t *pcm;
unsigned pcm_hz;
unsigned pcm_frames;
size_t pcm_size;
uint pcm_hz;
uint pcm_frames;
uz pcm_size;
snd_pcm_hw_params_t *pcm_params;
SpeexResamplerState *res;
OpusEncoder *enc;
us_ring_s *pcm_ring;
us_ring_s *enc_ring;
uint32_t pts;
us_ring_s *pcm_ring;
us_ring_s *enc_ring;
u32 pts;
pthread_t pcm_tid;
pthread_t enc_tid;
bool tids_created;
atomic_bool stop;
pthread_t pcm_tid;
pthread_t enc_tid;
bool tids_created;
atomic_bool stop;
} us_audio_s;
bool us_audio_probe(const char *name);
us_audio_s *us_audio_init(const char *name, unsigned pcm_hz);
us_audio_s *us_audio_init(const char *name, uint pcm_hz);
void us_audio_destroy(us_audio_s *audio);
int us_audio_get_encoded(us_audio_s *audio, uint8_t *data, size_t *size, uint64_t *pts);
int us_audio_get_encoded(us_audio_s *audio, u8 *data, uz *size, u64 *pts);

View File

@@ -51,6 +51,7 @@ us_janus_client_s *us_janus_client_init(janus_callbacks *gw, janus_plugin_sessio
client->session = session;
atomic_init(&client->transmit, false);
atomic_init(&client->transmit_audio, false);
atomic_init(&client->video_orient, 0);
atomic_init(&client->stop, false);
@@ -93,15 +94,17 @@ void us_janus_client_send(us_janus_client_s *client, const us_rtp_s *rtp) {
}
static void *_video_thread(void *v_client) {
US_THREAD_SETTLE("us_c_video");
return _common_thread(v_client, true);
}
static void *_audio_thread(void *v_client) {
US_THREAD_SETTLE("us_c_audio");
return _common_thread(v_client, false);
}
static void *_common_thread(void *v_client, bool video) {
us_janus_client_s *const client = (us_janus_client_s *)v_client;
us_janus_client_s *const client = v_client;
us_ring_s *const ring = (video ? client->video_ring : client->audio_ring);
assert(ring != NULL); // Audio may be NULL
@@ -129,6 +132,7 @@ static void *_common_thread(void *v_client, bool video) {
# endif
};
janus_plugin_rtp_extensions_reset(&packet.extensions);
/*if (rtp->zero_playout_delay) {
// https://github.com/pikvm/pikvm/issues/784
packet.extensions.min_delay = 0;
@@ -140,6 +144,13 @@ static void *_common_thread(void *v_client, bool video) {
packet.extensions.max_delay = 300; // == 3s, i.e. 10ms granularity
}*/
if (rtp.video) {
const uint video_orient = atomic_load(&client->video_orient);
if (video_orient != 0) {
packet.extensions.video_rotation = video_orient;
}
}
client->gw->relay_rtp(client->session, &packet);
}
}

View File

@@ -39,6 +39,7 @@ typedef struct us_janus_client_sx {
janus_plugin_session *session;
atomic_bool transmit;
atomic_bool transmit_audio;
atomic_uint video_orient;
pthread_t video_tid;
pthread_t audio_tid;

View File

@@ -36,11 +36,3 @@
JANUS_LOG(LOG_ERR, "[%s/%-9s] " x_msg ": %s\n", US_PLUGIN_NAME, x_prefix, ##__VA_ARGS__, m_perror_str); \
free(m_perror_str); \
}
#define US_ONCE(...) { \
const int m_reported = __LINE__; \
if (m_reported != once) { \
__VA_ARGS__; \
once = m_reported; \
} \
}

View File

@@ -58,7 +58,7 @@ int us_memsink_fd_wait_frame(int fd, us_memsink_shared_s *mem, u64 last_id) {
}
int us_memsink_fd_get_frame(int fd, us_memsink_shared_s *mem, us_frame_s *frame, u64 *frame_id, bool key_required) {
us_frame_set_data(frame, mem->data, mem->used);
us_frame_set_data(frame, us_memsink_get_data(mem), mem->used);
US_FRAME_COPY_META(mem, frame);
*frame_id = mem->id;
mem->last_client_ts = us_get_now_monotonic();

View File

@@ -42,12 +42,12 @@
#include "uslibs/list.h"
#include "uslibs/ring.h"
#include "uslibs/memsinksh.h"
#include "uslibs/tc358743.h"
#include "const.h"
#include "logging.h"
#include "client.h"
#include "audio.h"
#include "tc358743.h"
#include "rtp.h"
#include "rtpv.h"
#include "rtpa.h"
@@ -100,14 +100,13 @@ janus_plugin *create(void);
static void *_video_rtp_thread(void *arg) {
(void)arg;
US_THREAD_RENAME("us_video_rtp");
US_THREAD_SETTLE("us_video_rtp");
atomic_store(&_g_video_rtp_tid_created, true);
while (!_STOP) {
const int ri = us_ring_consumer_acquire(_g_video_ring, 0.1);
if (ri >= 0) {
us_frame_s *frame = _g_video_ring->items[ri];
const us_frame_s *const frame = _g_video_ring->items[ri];
_LOCK_VIDEO;
const bool zero_playout_delay = (frame->gop == 0);
us_rtpv_wrap(_g_rtpv, frame, zero_playout_delay);
@@ -120,8 +119,7 @@ static void *_video_rtp_thread(void *arg) {
static void *_video_sink_thread(void *arg) {
(void)arg;
US_THREAD_RENAME("us_video_sink");
US_THREAD_SETTLE("us_video_sink");
atomic_store(&_g_video_sink_tid_created, true);
us_frame_s *drop = us_frame_init();
@@ -138,12 +136,18 @@ static void *_video_sink_thread(void *arg) {
int fd = -1;
us_memsink_shared_s *mem = NULL;
const uz data_size = us_memsink_calculate_size(_g_config->video_sink_name);
if (data_size == 0) {
US_ONCE({ US_JLOG_ERROR("video", "Invalid memsink object suffix"); });
goto close_memsink;
}
if ((fd = shm_open(_g_config->video_sink_name, O_RDWR, 0)) <= 0) {
US_ONCE({ US_JLOG_PERROR("video", "Can't open memsink"); });
goto close_memsink;
}
if ((mem = us_memsink_shared_map(fd)) == NULL) {
if ((mem = us_memsink_shared_map(fd, data_size)) == NULL) {
US_ONCE({ US_JLOG_PERROR("video", "Can't map memsink"); });
goto close_memsink;
}
@@ -180,20 +184,40 @@ static void *_video_sink_thread(void *arg) {
}
close_memsink:
US_DELETE(mem, us_memsink_shared_unmap);
if (mem != NULL) {
us_memsink_shared_unmap(mem, data_size);
mem = NULL;
}
US_CLOSE_FD(fd);
US_JLOG_INFO("video", "Memsink closed");
sleep(1); // error_delay
}
us_frame_destroy(drop);
return NULL;
}
static int _check_tc358743_audio(uint *audio_hz) {
int fd;
if ((fd = open(_g_config->tc358743_dev_path, O_RDWR)) < 0) {
US_JLOG_PERROR("audio", "Can't open TC358743 V4L2 device");
return -1;
}
const int checked = us_tc358743_xioctl_get_audio_hz(fd, audio_hz);
if (checked < 0) {
US_JLOG_PERROR("audio", "Can't check TC358743 audio state (%d)", checked);
close(fd);
return -1;
}
close(fd);
return 0;
}
static void *_audio_thread(void *arg) {
(void)arg;
US_THREAD_RENAME("us_audio");
US_THREAD_SETTLE("us_audio");
atomic_store(&_g_audio_tid_created, true);
assert(_g_config->audio_dev_name != NULL);
assert(_g_config->tc358743_dev_path != NULL);
@@ -205,32 +229,27 @@ static void *_audio_thread(void *arg) {
continue;
}
us_tc358743_info_s info = {0};
uint audio_hz = 0;
us_audio_s *audio = NULL;
if (us_tc358743_read_info(_g_config->tc358743_dev_path, &info) < 0) {
if (_check_tc358743_audio(&audio_hz) < 0) {
goto close_audio;
}
if (!info.has_audio) {
if (audio_hz == 0) {
US_ONCE({ US_JLOG_INFO("audio", "No audio presented from the host"); });
goto close_audio;
}
US_ONCE({ US_JLOG_INFO("audio", "Detected host audio"); });
if ((audio = us_audio_init(_g_config->audio_dev_name, info.audio_hz)) == NULL) {
if ((audio = us_audio_init(_g_config->audio_dev_name, audio_hz)) == NULL) {
goto close_audio;
}
once = 0;
while (!_STOP && _HAS_WATCHERS && _HAS_LISTENERS) {
if (
us_tc358743_read_info(_g_config->tc358743_dev_path, &info) < 0
|| !info.has_audio
|| audio->pcm_hz != info.audio_hz
) {
if (_check_tc358743_audio(&audio_hz) < 0 || audio->pcm_hz != audio_hz) {
goto close_audio;
}
uz size = US_RTP_DATAGRAM_SIZE - US_RTP_HEADER_SIZE;
u8 data[size];
u64 pts;
@@ -443,12 +462,25 @@ static struct janus_plugin_result *_plugin_handle_message(
} else if (!strcmp(request_str, "watch")) {
bool with_audio = false;
uint video_orient = 0;
{
json_t *const params = json_object_get(msg, "params");
if (params != NULL) {
json_t *const audio = json_object_get(params, "audio");
if (audio != NULL && json_is_boolean(audio)) {
with_audio = (_g_rtpa != NULL && json_boolean_value(audio));
{
json_t *const obj = json_object_get(params, "audio");
if (obj != NULL && json_is_boolean(obj)) {
with_audio = (_g_rtpa != NULL && json_boolean_value(obj));
}
}
{
json_t *const obj = json_object_get(params, "orientation");
if (obj != NULL && json_is_integer(obj)) {
video_orient = json_integer_value(obj);
switch (video_orient) {
case 90: case 180: case 270: break;
default: video_orient = 0; break;
}
}
}
}
}
@@ -488,6 +520,7 @@ static struct janus_plugin_result *_plugin_handle_message(
US_LIST_ITERATE(_g_clients, client, {
if (client->session == session) {
atomic_store(&client->transmit_audio, with_audio);
atomic_store(&client->video_orient, video_orient);
}
has_listeners = (has_listeners || atomic_load(&client->transmit_audio));
});

View File

@@ -57,7 +57,7 @@ void us_rtp_write_header(us_rtp_s *rtp, u32 pts, bool marked) {
++rtp->seq;
# define WRITE_BE_U32(x_offset, x_value) \
*((u32 *)(rtp->datagram + x_offset)) = __builtin_bswap32(x_value)
*((u32*)(rtp->datagram + x_offset)) = __builtin_bswap32(x_value)
WRITE_BE_U32(0, word0);
WRITE_BE_U32(4, pts);
WRITE_BE_U32(8, rtp->ssrc);

View File

@@ -71,6 +71,7 @@ char *us_rtpv_make_sdp(us_rtpv_s *rtpv) {
"a=rtcp-fb:%u goog-remb" RN
"a=ssrc:%" PRIu32 " cname:ustreamer" RN
"a=extmap:1 http://www.webrtc.org/experiments/rtp-hdrext/playout-delay" RN
"a=extmap:2 urn:3gpp:video-orientation" RN
"a=sendonly" RN,
pl, pl, pl, pl,
pl, pl, pl,

View File

@@ -0,0 +1 @@
../../../src/libs/memsinksh.c

1
janus/src/uslibs/tc358743.c Symbolic link
View File

@@ -0,0 +1 @@
../../../src/libs/tc358743.c

1
janus/src/uslibs/tc358743.h Symbolic link
View File

@@ -0,0 +1 @@
../../../src/libs/tc358743.h

View File

@@ -1,6 +1,6 @@
.\" Manpage for ustreamer-dump.
.\" Open an issue or pull request to https://github.com/pikvm/ustreamer to correct errors or typos
.TH USTREAMER-DUMP 1 "version 5.56" "January 2021"
.TH USTREAMER-DUMP 1 "version 6.5" "January 2021"
.SH NAME
ustreamer-dump \- Dump uStreamer's memory sink to file

View File

@@ -1,6 +1,6 @@
.\" Manpage for ustreamer.
.\" Open an issue or pull request to https://github.com/pikvm/ustreamer to correct errors or typos
.TH USTREAMER 1 "version 5.56" "November 2020"
.TH USTREAMER 1 "version 6.5" "November 2020"
.SH NAME
ustreamer \- stream MJPEG video from any V4L2 device to the network
@@ -17,7 +17,7 @@ Without arguments, \fBustreamer\fR will try to open \fB/dev/video0\fR with 640x4
Please note that since µStreamer v2\.0 cross\-domain requests were disabled by default for security reasons\. To enable the old behavior, use the option \fB\-\-allow\-origin=\e*\fR\.
For example, the recommended way of running µStreamer with Auvidea B101 on a Raspberry Pi is:
For example, the recommended way of running µStreamer with TC358743-based capture device on a Raspberry Pi is:
\fBustreamer \e\fR
.RS
@@ -27,7 +27,7 @@ For example, the recommended way of running µStreamer with Auvidea B101 on a Ra
.nf
\fB\-\-workers=3 \e\fR # Maximum workers for V4L2 encoder
.nf
\fB\-\-persistent \e\fR # Don\'t re\-initialize device on timeout (for example when HDMI cable was disconnected)
\fB\-\-persistent \e\fR # Suppress repetitive signal source errors (for example when HDMI cable was disconnected)
.nf
\fB\-\-dv\-timings \e\fR # Use DV\-timings
.nf
@@ -69,7 +69,7 @@ Desired FPS. Default: maximum possible.
Drop frames smaller then this limit. Useful if the device produces small\-sized garbage frames. Default: 128 bytes.
.TP
.BR \-n ", " \-\-persistent
Don't re\-initialize device on timeout. Default: disabled.
Suppress repetitive signal source errors. Default: disabled.
.TP
.BR \-t ", " \-\-dv\-timings
Enable DV-timings querying and events processing to automatic resolution change. Default: disabled.
@@ -106,7 +106,7 @@ It doesn't do anything. Still here for compatibility.
It doesn't do anything. Still here for compatibility.
.TP
.BR \-K\ \fIsec ", " \-\-last\-as\-blank\ \fIsec
Show the last frame received from the camera after it was disconnected, but no more than specified time (or endlessly if 0 is specified). If the device has not yet been online, display some error message. Note: currently this option has no effect on memory sinks. Default: disabled.
It doesn't do anything. Still here for compatibility.
.TP
.BR \-l ", " \-\-slowdown
Slowdown capturing to 1 FPS or less when no stream or sink clients are connected. Useful to reduce CPU consumption. Default: disabled.
@@ -212,25 +212,25 @@ Timeout for client connections. Default: 10.
.SS "JPEG sink options"
With shared memory sink you can write a stream to a file. See \fBustreamer-dump\fR(1) for more info.
.TP
.BR \-\-sink\ \fIname
Use the specified shared memory object to sink JPEG frames. Default: disabled.
.BR \-\-jpeg\-sink\ \fIname
Use the specified shared memory object to sink JPEG frames. The name should end with a suffix ".jpeg" or ":jpeg". Default: disabled.
.TP
.BR \-\-sink\-mode\ \fImode
.BR \-\-jpeg\-sink\-mode\ \fImode
Set JPEG sink permissions (like 777). Default: 660.
.TP
.BR \-\-sink\-rm
.BR \-\-jpeg\-sink\-rm
Remove shared memory on stop. Default: disabled.
.TP
.BR \-\-sink\-client\-ttl\ \fIsec
.BR \-\-jpeg\-sink\-client\-ttl\ \fIsec
Client TTL. Default: 10.
.TP
.BR \-\-sink\-timeout\ \fIsec
.BR \-\-jpeg\-sink\-timeout\ \fIsec
Timeout for lock. Default: 1.
.SS "H264 sink options"
.TP
.BR \-\-h264\-sink\ \fIname
Use the specified shared memory object to sink H264 frames. Default: disabled.
Use the specified shared memory object to sink H264 frames. The name should end with a suffix ".h264" or ":h264". Default: disabled.
.TP
.BR \-\-h264\-sink\-mode\ \fImode
Set H264 sink permissions (like 777). Default: 660.
@@ -253,6 +253,22 @@ Interval between keyframes. Default: 30.
.BR \-\-h264\-m2m\-device\ \fI/dev/path
Path to V4L2 mem-to-mem encoder device. Default: auto-select.
.SS "RAW sink options"
.TP
.BR \-\-raw\-sink\ \fIname
Use the specified shared memory object to sink RAW frames. The name should end with a suffix ".raw" or ":raw". Default: disabled.
.TP
.BR \-\-raw\-sink\-mode\ \fImode
Set RAW sink permissions (like 777). Default: 660.
.TP
.BR \-\-raw\-sink\-rm
Remove shared memory on stop. Default: disabled.
.TP
.BR \-\-raw\-sink\-client\-ttl\ \fIsec
Client TTL. Default: 10.
.TP
.BR \-\-raw\-sink\-timeout\ \fIsec
Timeout for lock. Default: 1.
.SS "Process options"
.TP

View File

@@ -3,7 +3,7 @@
pkgname=ustreamer
pkgver=5.56
pkgver=6.5
pkgrel=1
pkgdesc="Lightweight and fast MJPEG-HTTP streamer"
url="https://github.com/pikvm/ustreamer"

View File

@@ -6,7 +6,7 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=ustreamer
PKG_VERSION:=5.56
PKG_VERSION:=6.5
PKG_RELEASE:=1
PKG_MAINTAINER:=Maxim Devaev <mdevaev@gmail.com>

View File

@@ -17,7 +17,7 @@ def _find_sources(suffix: str) -> list[str]:
if __name__ == "__main__":
setup(
name="ustreamer",
version="5.56",
version="6.5",
description="uStreamer tools",
author="Maxim Devaev",
author_email="mdevaev@gmail.com",

View File

@@ -0,0 +1 @@
../../../src/libs/memsinksh.c

View File

@@ -26,6 +26,7 @@ typedef struct {
double lock_timeout;
double wait_timeout;
double drop_same_frames;
uz data_size;
int fd;
us_memsink_shared_s *mem;
@@ -37,7 +38,10 @@ typedef struct {
static void _MemsinkObject_destroy_internals(_MemsinkObject *self) {
US_DELETE(self->mem, us_memsink_shared_unmap);
if (self->mem != NULL) {
us_memsink_shared_unmap(self->mem, self->data_size);
self->mem = NULL;
}
US_CLOSE_FD(self->fd);
US_DELETE(self->frame, us_frame_destroy);
}
@@ -64,13 +68,18 @@ static int _MemsinkObject_init(_MemsinkObject *self, PyObject *args, PyObject *k
SET_DOUBLE(drop_same_frames, >= 0);
# undef SET_DOUBLE
if ((self->data_size = us_memsink_calculate_size(self->obj)) == 0) {
PyErr_SetString(PyExc_ValueError, "Invalid memsink object suffix");
return -1;
}
self->frame = us_frame_init();
if ((self->fd = shm_open(self->obj, O_RDWR, 0)) == -1) {
PyErr_SetFromErrno(PyExc_OSError);
goto error;
}
if ((self->mem = us_memsink_shared_map(self->fd)) == NULL) {
if ((self->mem = us_memsink_shared_map(self->fd, self->data_size)) == NULL) {
PyErr_SetFromErrno(PyExc_OSError);
goto error;
}
@@ -99,67 +108,76 @@ static PyObject *_MemsinkObject_close(_MemsinkObject *self, PyObject *Py_UNUSED(
static PyObject *_MemsinkObject_enter(_MemsinkObject *self, PyObject *Py_UNUSED(ignored)) {
Py_INCREF(self);
return (PyObject *)self;
return (PyObject*)self;
}
static PyObject *_MemsinkObject_exit(_MemsinkObject *self, PyObject *Py_UNUSED(ignored)) {
return PyObject_CallMethod((PyObject *)self, "close", "");
return PyObject_CallMethod((PyObject*)self, "close", "");
}
static int _wait_frame(_MemsinkObject *self) {
const ldf deadline_ts = us_get_now_monotonic() + self->wait_timeout;
# define RETURN_OS_ERROR { \
Py_BLOCK_THREADS \
PyErr_SetFromErrno(PyExc_OSError); \
return -1; \
}
int locked = -1;
ldf now_ts;
do {
Py_BEGIN_ALLOW_THREADS
const int retval = us_flock_timedwait_monotonic(self->fd, self->lock_timeout);
locked = us_flock_timedwait_monotonic(self->fd, self->lock_timeout);
now_ts = us_get_now_monotonic();
if (retval < 0 && errno != EWOULDBLOCK) {
RETURN_OS_ERROR;
} else if (retval == 0) {
us_memsink_shared_s *mem = self->mem;
if (mem->magic == US_MEMSINK_MAGIC && mem->version == US_MEMSINK_VERSION && mem->id != self->frame_id) {
if (self->drop_same_frames > 0) {
if (
US_FRAME_COMPARE_GEOMETRY(self->mem, self->frame)
&& (self->frame_ts + self->drop_same_frames > now_ts)
&& !memcmp(self->frame->data, mem->data, mem->used)
) {
self->frame_id = mem->id;
goto drop;
}
}
Py_BLOCK_THREADS
return 0;
if (locked < 0) {
if (errno == EWOULDBLOCK) {
goto retry;
}
goto os_error;
}
if (flock(self->fd, LOCK_UN) < 0) {
RETURN_OS_ERROR;
us_memsink_shared_s *mem = self->mem;
if (mem->magic != US_MEMSINK_MAGIC || mem->version != US_MEMSINK_VERSION) {
goto retry;
}
// Let the sink know that the client is alive
mem->last_client_ts = now_ts;
if (mem->id == self->frame_id) {
goto retry;
}
if (self->drop_same_frames > 0) {
if (
US_FRAME_COMPARE_GEOMETRY(self->mem, self->frame)
&& (self->frame_ts + self->drop_same_frames > now_ts)
&& !memcmp(self->frame->data, us_memsink_get_data(mem), mem->used)
) {
self->frame_id = mem->id;
goto retry;
}
}
drop:
// New frame found
Py_BLOCK_THREADS
return 0;
os_error:
Py_BLOCK_THREADS
PyErr_SetFromErrno(PyExc_OSError);
return -1;
retry:
if (locked >= 0 && flock(self->fd, LOCK_UN) < 0) {
goto os_error;
}
if (usleep(1000) < 0) {
RETURN_OS_ERROR;
goto os_error;
}
Py_END_ALLOW_THREADS
if (PyErr_CheckSignals() < 0) {
return -1;
}
} while (now_ts < deadline_ts);
} while (now_ts < deadline_ts);
return -2;
# undef RETURN_OS_ERROR
}
static PyObject *_MemsinkObject_wait_frame(_MemsinkObject *self, PyObject *args, PyObject *kwargs) {
@@ -181,11 +199,10 @@ static PyObject *_MemsinkObject_wait_frame(_MemsinkObject *self, PyObject *args,
}
us_memsink_shared_s *mem = self->mem;
us_frame_set_data(self->frame, mem->data, mem->used);
us_frame_set_data(self->frame, us_memsink_get_data(mem), mem->used);
US_FRAME_COPY_META(self->mem, self->frame);
self->frame_id = mem->id;
self->frame_ts = us_get_now_monotonic();
mem->last_client_ts = self->frame_ts;
if (key_required) {
mem->key_requested = true;
}
@@ -222,7 +239,7 @@ static PyObject *_MemsinkObject_wait_frame(_MemsinkObject *self, PyObject *args,
SET_NUMBER(grab_ts, Double, Float);
SET_NUMBER(encode_begin_ts, Double, Float);
SET_NUMBER(encode_end_ts, Double, Float);
SET_VALUE("data", PyBytes_FromStringAndSize((const char *)self->frame->data, self->frame->used));
SET_VALUE("data", PyBytes_FromStringAndSize((const char*)self->frame->data, self->frame->used));
# undef SET_NUMBER
# undef SET_VALUE
@@ -297,7 +314,7 @@ PyMODINIT_FUNC PyInit_ustreamer(void) {
Py_INCREF(&_MemsinkType);
if (PyModule_AddObject(module, "Memsink", (PyObject *)&_MemsinkType) < 0) {
if (PyModule_AddObject(module, "Memsink", (PyObject*)&_MemsinkType) < 0) {
return NULL;
}

View File

@@ -14,7 +14,7 @@ _V4P = ustreamer-v4p.bin
_CFLAGS = -MD -c -std=c17 -Wall -Wextra -D_GNU_SOURCE $(CFLAGS)
_LDFLAGS = $(LDFLAGS)
_COMMON_LIBS = -lm -ljpeg -pthread -lrt
_COMMON_LIBS = -lm -ljpeg -pthread -lrt -latomic
_USTR_LIBS = $(_COMMON_LIBS) -levent -levent_pthreads
_USTR_SRCS = $(shell ls \

View File

@@ -47,7 +47,7 @@ us_output_file_s *us_output_file_init(const char *path, bool json) {
}
void us_output_file_write(void *v_output, const us_frame_s *frame) {
us_output_file_s *output = (us_output_file_s *)v_output;
us_output_file_s *output = v_output;
if (output->json) {
us_base64_encode(frame->data, frame->used, &output->base64_data, &output->base64_allocated);
fprintf(output->fp,
@@ -66,7 +66,7 @@ void us_output_file_write(void *v_output, const us_frame_s *frame) {
}
void us_output_file_destroy(void *v_output) {
us_output_file_s *output = (us_output_file_s *)v_output;
us_output_file_s *output = v_output;
US_DELETE(output->base64_data, free);
if (output->fp && output->fp != stdout) {
if (fclose(output->fp) < 0) {

View File

@@ -24,7 +24,6 @@
#include <stdlib.h>
#include <stdbool.h>
#include <unistd.h>
#include <signal.h>
#include <limits.h>
#include <float.h>
#include <getopt.h>
@@ -36,6 +35,7 @@
#include "../libs/logging.h"
#include "../libs/frame.h"
#include "../libs/memsink.h"
#include "../libs/signal.h"
#include "../libs/options.h"
#include "file.h"
@@ -95,7 +95,6 @@ typedef struct {
static void _signal_handler(int signum);
static void _install_signal_handlers(void);
static int _dump_sink(
const char *sink_name, unsigned sink_timeout,
@@ -183,14 +182,14 @@ int main(int argc, char *argv[]) {
_output_context_s ctx = {0};
if (output_path && output_path[0] != '\0') {
if ((ctx.v_output = (void *)us_output_file_init(output_path, output_json)) == NULL) {
if ((ctx.v_output = (void*)us_output_file_init(output_path, output_json)) == NULL) {
return 1;
}
ctx.write = us_output_file_write;
ctx.destroy = us_output_file_destroy;
}
_install_signal_handlers();
us_install_signals_handler(_signal_handler, false);
const int retval = abs(_dump_sink(sink_name, sink_timeout, count, interval, key_required, &ctx));
if (ctx.v_output && ctx.destroy) {
ctx.destroy(ctx.v_output);
@@ -206,25 +205,6 @@ static void _signal_handler(int signum) {
_g_stop = true;
}
static void _install_signal_handlers(void) {
struct sigaction sig_act = {0};
assert(!sigemptyset(&sig_act.sa_mask));
sig_act.sa_handler = _signal_handler;
assert(!sigaddset(&sig_act.sa_mask, SIGINT));
assert(!sigaddset(&sig_act.sa_mask, SIGTERM));
assert(!sigaddset(&sig_act.sa_mask, SIGPIPE));
US_LOG_DEBUG("Installing SIGINT handler ...");
assert(!sigaction(SIGINT, &sig_act, NULL));
US_LOG_DEBUG("Installing SIGTERM handler ...");
assert(!sigaction(SIGTERM, &sig_act, NULL));
US_LOG_DEBUG("Installing SIGTERM handler ...");
assert(!sigaction(SIGPIPE, &sig_act, NULL));
}
static int _dump_sink(
const char *sink_name, unsigned sink_timeout,
long long count, long double interval,

View File

@@ -25,8 +25,8 @@
#include "types.h"
#define US_VERSION_MAJOR 5
#define US_VERSION_MINOR 56
#define US_VERSION_MAJOR 6
#define US_VERSION_MINOR 5
#define US_MAKE_VERSION2(_major, _minor) #_major "." #_minor
#define US_MAKE_VERSION1(_major, _minor) US_MAKE_VERSION2(_major, _minor)

View File

@@ -23,6 +23,7 @@
#include "device.h"
#include <stdlib.h>
#include <stdatomic.h>
#include <stddef.h>
#include <string.h>
#include <strings.h>
@@ -80,12 +81,12 @@ static const struct {
{"USERPTR", V4L2_MEMORY_USERPTR},
};
static int _device_wait_buffer(us_device_s *dev);
static int _device_consume_event(us_device_s *dev);
static void _v4l2_buffer_copy(const struct v4l2_buffer *src, struct v4l2_buffer *dest);
static bool _device_is_buffer_valid(us_device_s *dev, const struct v4l2_buffer *buf, const u8 *data);
static int _device_open_check_cap(us_device_s *dev);
static int _device_open_dv_timings(us_device_s *dev);
static int _device_apply_dv_timings(us_device_s *dev);
static int _device_open_dv_timings(us_device_s *dev, bool apply);
static int _device_open_format(us_device_s *dev, bool first);
static void _device_open_hw_fps(us_device_s *dev);
static void _device_open_jpeg_quality(us_device_s *dev);
@@ -149,16 +150,16 @@ int us_device_parse_format(const char *str) {
return item->format;
}
});
return US_FORMAT_UNKNOWN;
return -1;
}
v4l2_std_id us_device_parse_standard(const char *str) {
US_ARRAY_ITERATE(_STANDARDS, 1, item, {
int us_device_parse_standard(const char *str) {
US_ARRAY_ITERATE(_STANDARDS, 0, item, {
if (!strcasecmp(item->name, str)) {
return item->standard;
}
});
return US_STANDARD_UNKNOWN;
return -1;
}
int us_device_parse_io_method(const char *str) {
@@ -167,20 +168,46 @@ int us_device_parse_io_method(const char *str) {
return item->io_method;
}
});
return US_IO_METHOD_UNKNOWN;
return -1;
}
int us_device_open(us_device_s *dev) {
us_device_runtime_s *const run = dev->run;
if ((run->fd = open(dev->path, O_RDWR|O_NONBLOCK)) < 0) {
_D_LOG_PERROR("Can't open device");
if (access(dev->path, R_OK | W_OK) < 0) {
if (run->open_error_reported != -errno) {
run->open_error_reported = -errno; // Don't confuse it with __LINE__
US_LOG_PERROR("No access to capture device");
}
goto tmp_error;
}
_D_LOG_DEBUG("Opening capture device ...");
if ((run->fd = open(dev->path, O_RDWR | O_NONBLOCK)) < 0) {
_D_LOG_PERROR("Can't capture open device");
goto error;
}
_D_LOG_DEBUG("Capture device fd=%d opened", run->fd);
if (dev->dv_timings && dev->persistent) {
_D_LOG_DEBUG("Probing DV-timings or QuerySTD ...");
if (_device_open_dv_timings(dev, false) < 0) {
const int line = __LINE__;
if (run->open_error_reported != line) {
run->open_error_reported = line;
_D_LOG_ERROR("No signal from source");
}
goto tmp_error;
}
}
if (_device_open_check_cap(dev) < 0) {
goto error;
}
if (_device_open_dv_timings(dev) < 0) {
if (_device_apply_resolution(dev, dev->width, dev->height, dev->run->hz)) {
goto error;
}
if (dev->dv_timings && _device_open_dv_timings(dev, true) < 0) {
goto error;
}
if (_device_open_format(dev, true) < 0) {
@@ -209,10 +236,17 @@ int us_device_open(us_device_s *dev) {
goto error;
}
run->streamon = true;
run->open_error_reported = 0;
_D_LOG_INFO("Capturing started");
return 0;
tmp_error:
us_device_close(dev);
return -2;
error:
run->open_error_reported = 0;
us_device_close(dev);
return -1;
}
@@ -220,17 +254,21 @@ error:
void us_device_close(us_device_s *dev) {
us_device_runtime_s *const run = dev->run;
bool say = false;
if (run->streamon) {
say = true;
_D_LOG_DEBUG("Calling VIDIOC_STREAMOFF ...");
enum v4l2_buf_type type = run->capture_type;
if (us_xioctl(run->fd, VIDIOC_STREAMOFF, &type) < 0) {
_D_LOG_PERROR("Can't stop capturing");
}
run->streamon = false;
_D_LOG_INFO("Capturing stopped");
}
if (run->hw_bufs != NULL) {
_D_LOG_DEBUG("Releasing device buffers ...");
say = true;
_D_LOG_DEBUG("Releasing HW buffers ...");
for (uint index = 0; index < run->n_bufs; ++index) {
us_hw_buffer_s *hw = &run->hw_bufs[index];
@@ -239,7 +277,7 @@ void us_device_close(us_device_s *dev) {
if (dev->io_method == V4L2_MEMORY_MMAP) {
if (hw->raw.allocated > 0 && hw->raw.data != NULL) {
if (munmap(hw->raw.data, hw->raw.allocated) < 0) {
_D_LOG_PERROR("Can't unmap device buffer=%u", index);
_D_LOG_PERROR("Can't unmap HW buffer=%u", index);
}
}
} else { // V4L2_MEMORY_USERPTR
@@ -255,55 +293,25 @@ void us_device_close(us_device_s *dev) {
}
US_CLOSE_FD(run->fd);
run->persistent_timeout_reported = false;
}
int us_device_select(us_device_s *dev, bool *has_read, bool *has_error) {
us_device_runtime_s *const run = dev->run;
# define INIT_FD_SET(x_set) \
fd_set x_set; FD_ZERO(&x_set); FD_SET(run->fd, &x_set);
INIT_FD_SET(read_fds);
INIT_FD_SET(error_fds);
# undef INIT_FD_SET
// Раньше мы проверяли и has_write, но потом выяснилось, что libcamerify зачем-то
// генерирует эвенты на запись, вероятно ошибочно. Судя по всему, игнорирование
// has_write не делает никому плохо.
struct timeval timeout;
timeout.tv_sec = dev->timeout;
timeout.tv_usec = 0;
_D_LOG_DEBUG("Calling select() on video device ...");
int retval = select(run->fd + 1, &read_fds, NULL, &error_fds, &timeout);
if (retval > 0) {
*has_read = FD_ISSET(run->fd, &read_fds);
*has_error = FD_ISSET(run->fd, &error_fds);
} else {
*has_read = false;
*has_error = false;
if (say) {
_D_LOG_INFO("Capturing stopped");
}
_D_LOG_DEBUG("Device select() --> %d; has_read=%d, has_error=%d", retval, *has_read, *has_error);
if (retval > 0) {
run->persistent_timeout_reported = false;
} else if (retval == 0) {
if (dev->persistent) {
if (!run->persistent_timeout_reported) {
_D_LOG_ERROR("Persistent device timeout (unplugged)");
run->persistent_timeout_reported = true;
}
} else {
// Если устройство не персистентное, то таймаут является ошибкой
retval = -1;
}
}
return retval;
}
int us_device_grab_buffer(us_device_s *dev, us_hw_buffer_s **hw) {
// Это сложная функция, которая делает сразу много всего, чтобы получить новый фрейм.
// - Вызывается _device_wait_buffer() с select() внутри, чтобы подождать новый фрейм
// или эвент V4L2. Обработка эвентов более приоритетна, чем кадров.
// - Если есть новые фреймы, то пропустить их все, пока не закончатся и вернуть
// самый-самый свежий, содержащий при этом валидные данные.
// - Если таковых не нашлось, вернуть -2.
// - Ошибка -1 возвращается при любых сбоях.
if (_device_wait_buffer(dev) < 0) {
return -1;
}
us_device_runtime_s *const run = dev->run;
*hw = NULL;
@@ -319,7 +327,7 @@ int us_device_grab_buffer(us_device_s *dev, us_hw_buffer_s **hw) {
uint skipped = 0;
bool broken = false;
_D_LOG_DEBUG("Grabbing device buffer ...");
_D_LOG_DEBUG("Grabbing hw buffer ...");
do {
struct v4l2_buffer new = {0};
@@ -335,7 +343,7 @@ int us_device_grab_buffer(us_device_s *dev, us_hw_buffer_s **hw) {
if (new_got) {
if (new.index >= run->n_bufs) {
_D_LOG_ERROR("V4L2 error: grabbed invalid device buffer=%u, n_bufs=%u", new.index, run->n_bufs);
_D_LOG_ERROR("V4L2 error: grabbed invalid HW buffer=%u, n_bufs=%u", new.index, run->n_bufs);
return -1;
}
@@ -343,7 +351,7 @@ int us_device_grab_buffer(us_device_s *dev, us_hw_buffer_s **hw) {
# define FRAME_DATA(x_buf) run->hw_bufs[x_buf.index].raw.data
if (GRABBED(new)) {
_D_LOG_ERROR("V4L2 error: grabbed device buffer=%u is already used", new.index);
_D_LOG_ERROR("V4L2 error: grabbed HW buffer=%u is already used", new.index);
return -1;
}
GRABBED(new) = true;
@@ -354,9 +362,9 @@ int us_device_grab_buffer(us_device_s *dev, us_hw_buffer_s **hw) {
broken = !_device_is_buffer_valid(dev, &new, FRAME_DATA(new));
if (broken) {
_D_LOG_DEBUG("Releasing device buffer=%u (broken frame) ...", new.index);
_D_LOG_DEBUG("Releasing HW buffer=%u (broken frame) ...", new.index);
if (us_xioctl(run->fd, VIDIOC_QBUF, &new) < 0) {
_D_LOG_PERROR("Can't release device buffer=%u (broken frame)", new.index);
_D_LOG_PERROR("Can't release HW buffer=%u (broken frame)", new.index);
return -1;
}
GRABBED(new) = false;
@@ -365,7 +373,7 @@ int us_device_grab_buffer(us_device_s *dev, us_hw_buffer_s **hw) {
if (buf_got) {
if (us_xioctl(run->fd, VIDIOC_QBUF, &buf) < 0) {
_D_LOG_PERROR("Can't release device buffer=%u (skipped frame)", buf.index);
_D_LOG_PERROR("Can't release HW buffer=%u (skipped frame)", buf.index);
return -1;
}
GRABBED(buf) = false;
@@ -387,12 +395,13 @@ int us_device_grab_buffer(us_device_s *dev, us_hw_buffer_s **hw) {
return -2; // If we have only broken frames on this capture session
}
}
_D_LOG_PERROR("Can't grab device buffer");
_D_LOG_PERROR("Can't grab HW buffer");
return -1;
}
} while (true);
*hw = &run->hw_bufs[buf.index];
atomic_store(&(*hw)->refs, 0);
(*hw)->raw.dma_fd = (*hw)->dma_fd;
(*hw)->raw.used = buf.bytesused;
(*hw)->raw.width = run->width;
@@ -403,36 +412,89 @@ int us_device_grab_buffer(us_device_s *dev, us_hw_buffer_s **hw) {
_v4l2_buffer_copy(&buf, &(*hw)->buf);
(*hw)->raw.grab_ts = (ldf)((buf.timestamp.tv_sec * (u64)1000) + (buf.timestamp.tv_usec / 1000)) / 1000;
_D_LOG_DEBUG("Grabbed new frame: buffer=%u, bytesused=%u, grab_ts=%.3Lf, latency=%.3Lf, skipped=%u",
_D_LOG_DEBUG("Grabbed HW buffer=%u: bytesused=%u, grab_ts=%.3Lf, latency=%.3Lf, skipped=%u",
buf.index, buf.bytesused, (*hw)->raw.grab_ts, us_get_now_monotonic() - (*hw)->raw.grab_ts, skipped);
return buf.index;
}
int us_device_release_buffer(us_device_s *dev, us_hw_buffer_s *hw) {
assert(atomic_load(&hw->refs) == 0);
const uint index = hw->buf.index;
_D_LOG_DEBUG("Releasing device buffer=%u ...", index);
_D_LOG_DEBUG("Releasing HW buffer=%u ...", index);
if (us_xioctl(dev->run->fd, VIDIOC_QBUF, &hw->buf) < 0) {
_D_LOG_PERROR("Can't release device buffer=%u", index);
_D_LOG_PERROR("Can't release HW buffer=%u", index);
return -1;
}
hw->grabbed = false;
_D_LOG_DEBUG("HW buffer=%u released", index);
return 0;
}
int us_device_consume_event(us_device_s *dev) {
struct v4l2_event event;
_D_LOG_INFO("Consuming V4L2 event ...");
if (us_xioctl(dev->run->fd, VIDIOC_DQEVENT, &event) == 0) {
switch (event.type) {
case V4L2_EVENT_SOURCE_CHANGE:
_D_LOG_INFO("Got V4L2_EVENT_SOURCE_CHANGE: source changed");
return -1;
case V4L2_EVENT_EOS:
_D_LOG_INFO("Got V4L2_EVENT_EOS: end of stream (ignored)");
return 0;
void us_device_buffer_incref(us_hw_buffer_s *hw) {
atomic_fetch_add(&hw->refs, 1);
}
void us_device_buffer_decref(us_hw_buffer_s *hw) {
atomic_fetch_sub(&hw->refs, 1);
}
int _device_wait_buffer(us_device_s *dev) {
us_device_runtime_s *const run = dev->run;
# define INIT_FD_SET(x_set) \
fd_set x_set; FD_ZERO(&x_set); FD_SET(run->fd, &x_set);
INIT_FD_SET(read_fds);
INIT_FD_SET(error_fds);
# undef INIT_FD_SET
// Раньше мы проверяли и has_write, но потом выяснилось, что libcamerify зачем-то
// генерирует эвенты на запись, вероятно ошибочно. Судя по всему, игнорирование
// has_write не делает никому плохо.
struct timeval timeout;
timeout.tv_sec = dev->timeout;
timeout.tv_usec = 0;
_D_LOG_DEBUG("Calling select() on video device ...");
bool has_read = false;
bool has_error = false;
const int selected = select(run->fd + 1, &read_fds, NULL, &error_fds, &timeout);
if (selected > 0) {
has_read = FD_ISSET(run->fd, &read_fds);
has_error = FD_ISSET(run->fd, &error_fds);
}
_D_LOG_DEBUG("Device select() --> %d; has_read=%d, has_error=%d", selected, has_read, has_error);
if (selected < 0) {
if (errno != EINTR) {
_D_LOG_PERROR("Device select() error");
}
return -1;
} else if (selected == 0) {
_D_LOG_ERROR("Device select() timeout");
return -1;
} else {
_D_LOG_PERROR("Got some V4L2 device event, but where is it? ");
if (has_error && _device_consume_event(dev) < 0) {
return -1; // Restart required
}
}
return 0;
}
static int _device_consume_event(us_device_s *dev) {
struct v4l2_event event;
if (us_xioctl(dev->run->fd, VIDIOC_DQEVENT, &event) < 0) {
_D_LOG_PERROR("Can't consume V4L2 event");
return -1;
}
switch (event.type) {
case V4L2_EVENT_SOURCE_CHANGE:
_D_LOG_INFO("Got V4L2_EVENT_SOURCE_CHANGE: Source changed");
return -1;
case V4L2_EVENT_EOS:
_D_LOG_INFO("Got V4L2_EVENT_EOS: End of stream");
return -1;
}
return 0;
}
@@ -535,68 +597,81 @@ static int _device_open_check_cap(us_device_s *dev) {
return 0;
}
static int _device_open_dv_timings(us_device_s *dev) {
_device_apply_resolution(dev, dev->width, dev->height, dev->run->hz);
if (dev->dv_timings) {
_D_LOG_DEBUG("Using DV-timings");
static int _device_open_dv_timings(us_device_s *dev, bool apply) {
// Just probe only if @apply is false
if (_device_apply_dv_timings(dev) < 0) {
return -1;
}
const us_device_runtime_s *const run = dev->run;
struct v4l2_event_subscription sub = {.type = V4L2_EVENT_SOURCE_CHANGE};
_D_LOG_DEBUG("Subscribing to DV-timings events ...")
if (us_xioctl(dev->run->fd, VIDIOC_SUBSCRIBE_EVENT, &sub) < 0) {
_D_LOG_PERROR("Can't subscribe to DV-timings events");
return -1;
}
}
return 0;
}
static int _device_apply_dv_timings(us_device_s *dev) {
us_device_runtime_s *const run = dev->run; // cppcheck-suppress constVariablePointer
int dv_errno = 0;
struct v4l2_dv_timings dv = {0};
_D_LOG_DEBUG("Calling us_xioctl(VIDIOC_QUERY_DV_TIMINGS) ...");
if (us_xioctl(run->fd, VIDIOC_QUERY_DV_TIMINGS, &dv) == 0) {
float hz = 0;
if (dv.type == V4L2_DV_BT_656_1120) {
// See v4l2_print_dv_timings() in the kernel
const uint htot = V4L2_DV_BT_FRAME_WIDTH(&dv.bt);
const uint vtot = V4L2_DV_BT_FRAME_HEIGHT(&dv.bt) / (dv.bt.interlaced ? 2 : 1);
const uint fps = ((htot * vtot) > 0 ? ((100 * (u64)dv.bt.pixelclock)) / (htot * vtot) : 0);
hz = (fps / 100) + (fps % 100) / 100.0;
_D_LOG_INFO("Got new DV-timings: %ux%u%s%.02f, pixclk=%llu, vsync=%u, hsync=%u",
dv.bt.width, dv.bt.height, (dv.bt.interlaced ? "i" : "p"), hz,
(ull)dv.bt.pixelclock, dv.bt.vsync, dv.bt.hsync); // See #11 about %llu
} else {
_D_LOG_INFO("Got new DV-timings: %ux%u, pixclk=%llu, vsync=%u, hsync=%u",
dv.bt.width, dv.bt.height,
(ull)dv.bt.pixelclock, dv.bt.vsync, dv.bt.hsync);
}
_D_LOG_DEBUG("Calling us_xioctl(VIDIOC_S_DV_TIMINGS) ...");
if (us_xioctl(run->fd, VIDIOC_S_DV_TIMINGS, &dv) < 0) {
_D_LOG_PERROR("Failed to set DV-timings");
return -1;
}
if (_device_apply_resolution(dev, dv.bt.width, dv.bt.height, hz) < 0) {
return -1;
}
} else {
_D_LOG_DEBUG("Calling us_xioctl(VIDIOC_QUERYSTD) ...");
if (us_xioctl(run->fd, VIDIOC_QUERYSTD, &dev->standard) == 0) {
_D_LOG_INFO("Applying the new VIDIOC_S_STD: %s ...", _standard_to_string(dev->standard));
if (us_xioctl(run->fd, VIDIOC_S_STD, &dev->standard) < 0) {
_D_LOG_PERROR("Can't set video standard");
return -1;
}
}
_D_LOG_DEBUG("Querying DV-timings (apply=%u) ...", apply);
if (us_xioctl(run->fd, VIDIOC_QUERY_DV_TIMINGS, &dv) < 0) {
// TC358743 errors here (see in the kernel: drivers/media/i2c/tc358743.c):
// - ENOLINK: No valid signal (SYS_STATUS & MASK_S_TMDS)
// - ENOLCK: No sync on signal (SYS_STATUS & MASK_S_SYNC)
dv_errno = errno;
goto querystd;
} else if (!apply) {
goto probe_only;
}
float hz = 0;
if (dv.type == V4L2_DV_BT_656_1120) {
// See v4l2_print_dv_timings() in the kernel
const uint htot = V4L2_DV_BT_FRAME_WIDTH(&dv.bt);
const uint vtot = V4L2_DV_BT_FRAME_HEIGHT(&dv.bt) / (dv.bt.interlaced ? 2 : 1);
const uint fps = ((htot * vtot) > 0 ? ((100 * (u64)dv.bt.pixelclock)) / (htot * vtot) : 0);
hz = (fps / 100) + (fps % 100) / 100.0;
_D_LOG_INFO("Detected DV-timings: %ux%u%s%.02f, pixclk=%llu, vsync=%u, hsync=%u",
dv.bt.width, dv.bt.height, (dv.bt.interlaced ? "i" : "p"), hz,
(ull)dv.bt.pixelclock, dv.bt.vsync, dv.bt.hsync); // See #11 about %llu
} else {
_D_LOG_INFO("Detected DV-timings: %ux%u, pixclk=%llu, vsync=%u, hsync=%u",
dv.bt.width, dv.bt.height,
(ull)dv.bt.pixelclock, dv.bt.vsync, dv.bt.hsync);
}
_D_LOG_DEBUG("Applying DV-timings ...");
if (us_xioctl(run->fd, VIDIOC_S_DV_TIMINGS, &dv) < 0) {
_D_LOG_PERROR("Failed to apply DV-timings");
return -1;
}
if (_device_apply_resolution(dev, dv.bt.width, dv.bt.height, hz) < 0) {
return -1;
}
goto subscribe;
querystd:
_D_LOG_DEBUG("Failed to query DV-timings, trying QuerySTD ...");
if (us_xioctl(run->fd, VIDIOC_QUERYSTD, &dev->standard) < 0) {
if (apply) {
char *std_error = us_errno_to_string(errno); // Read the errno first
char *dv_error = us_errno_to_string(dv_errno);
_D_LOG_ERROR("Failed to query DV-timings (%s) and QuerySTD (%s)", dv_error, std_error);
free(dv_error);
free(std_error);
}
return -1;
} else if (!apply) {
goto probe_only;
}
if (us_xioctl(run->fd, VIDIOC_S_STD, &dev->standard) < 0) {
_D_LOG_PERROR("Can't set apply standard: %s", _standard_to_string(dev->standard));
return -1;
}
_D_LOG_DEBUG("Applied new video standard: %s", _standard_to_string(dev->standard));
subscribe:
; // Empty statement for the goto label above
struct v4l2_event_subscription sub = {.type = V4L2_EVENT_SOURCE_CHANGE};
_D_LOG_DEBUG("Subscribing to V4L2_EVENT_SOURCE_CHANGE ...")
if (us_xioctl(dev->run->fd, VIDIOC_SUBSCRIBE_EVENT, &sub) < 0) {
_D_LOG_PERROR("Can't subscribe to V4L2_EVENT_SOURCE_CHANGE");
return -1;
}
probe_only:
return 0;
}
@@ -659,7 +734,7 @@ static int _device_open_format(us_device_s *dev, bool first) {
_format_to_string_supported(FMT(pixelformat)));
char *format_str;
if ((format_str = (char *)_format_to_string_nullable(FMT(pixelformat))) != NULL) {
if ((format_str = (char*)_format_to_string_nullable(FMT(pixelformat))) != NULL) {
_D_LOG_INFO("Falling back to format=%s", format_str);
} else {
char fourcc_str[8];
@@ -807,6 +882,7 @@ static int _device_open_io_method_mmap(us_device_s *dev) {
}
us_hw_buffer_s *hw = &run->hw_bufs[run->n_bufs];
atomic_init(&hw->refs, 0);
const uz buf_size = (run->capture_mplane ? buf.m.planes[0].length : buf.length);
const off_t buf_offset = (run->capture_mplane ? buf.m.planes[0].m.mem_offset : buf.m.offset);
@@ -1060,7 +1136,7 @@ static const char *_standard_to_string(v4l2_std_id standard) {
return item->name;
}
});
return _STANDARDS[0].name;
return "???";
}
static const char *_io_method_to_string_supported(enum v4l2_memory io_method) {

View File

@@ -22,6 +22,8 @@
#pragma once
#include <stdatomic.h>
#include <linux/videodev2.h>
#include "types.h"
@@ -29,20 +31,15 @@
#define US_VIDEO_MIN_WIDTH ((uint)160)
#define US_VIDEO_MAX_WIDTH ((uint)15360)
#define US_VIDEO_MAX_WIDTH ((uint)15360) // Remember about stream->run->http_capture_state;
#define US_VIDEO_MIN_HEIGHT ((uint)120)
#define US_VIDEO_MAX_HEIGHT ((uint)8640)
#define US_VIDEO_MAX_FPS ((uint)120)
#define US_STANDARD_UNKNOWN V4L2_STD_UNKNOWN
#define US_STANDARDS_STR "PAL, NTSC, SECAM"
#define US_FORMAT_UNKNOWN -1
#define US_FORMATS_STR "YUYV, YVYU, UYVY, RGB565, RGB24, BGR24, MJPEG, JPEG"
#define US_IO_METHOD_UNKNOWN -1
#define US_IO_METHODS_STR "MMAP, USERPTR"
@@ -51,6 +48,7 @@ typedef struct {
struct v4l2_buffer buf;
int dma_fd;
bool grabbed;
atomic_int refs;
} us_hw_buffer_s;
typedef struct {
@@ -69,7 +67,7 @@ typedef struct {
enum v4l2_buf_type capture_type;
bool capture_mplane;
bool streamon;
bool persistent_timeout_reported;
int open_error_reported;
} us_device_runtime_s;
typedef enum {
@@ -126,13 +124,14 @@ us_device_s *us_device_init(void);
void us_device_destroy(us_device_s *dev);
int us_device_parse_format(const char *str);
v4l2_std_id us_device_parse_standard(const char *str);
int us_device_parse_standard(const char *str);
int us_device_parse_io_method(const char *str);
int us_device_open(us_device_s *dev);
void us_device_close(us_device_s *dev);
int us_device_select(us_device_s *dev, bool *has_read, bool *has_error);
int us_device_grab_buffer(us_device_s *dev, us_hw_buffer_s **hw);
int us_device_release_buffer(us_device_s *dev, us_hw_buffer_s *hw);
int us_device_consume_event(us_device_s *dev);
void us_device_buffer_incref(us_hw_buffer_s *hw);
void us_device_buffer_decref(us_hw_buffer_s *hw);

View File

@@ -52,6 +52,34 @@ void us_frametext_destroy(us_frametext_s *ft) {
free(ft);
}
/*
Every character in the font is encoded row-wise in 8 bytes.
The least significant bit of each byte corresponds to the first pixel in a row.
The character 'A' (0x41 / 65) is encoded as { 0x0C, 0x1E, 0x33, 0x33, 0x3F, 0x33, 0x33, 0x00}
0x0C => 0000 1100 => ..XX....
0X1E => 0001 1110 => .XXXX...
0x33 => 0011 0011 => XX..XX..
0x33 => 0011 0011 => XX..XX..
0x3F => 0011 1111 => xxxxxx..
0x33 => 0011 0011 => XX..XX..
0x33 => 0011 0011 => XX..XX..
0x00 => 0000 0000 => ........
To access the nth pixel in a row, right-shift by n.
. . X X . . . .
| | | | | | | |
(0x0C >> 0) & 1 == 0-+ | | | | | | |
(0x0C >> 1) & 1 == 0---+ | | | | | |
(0x0C >> 2) & 1 == 1-----+ | | | | |
(0x0C >> 3) & 1 == 1-------+ | | | |
(0x0C >> 4) & 1 == 0---------+ | | |
(0x0C >> 5) & 1 == 0-----------+ | |
(0x0C >> 6) & 1 == 0-------------+ |
(0x0C >> 7) & 1 == 0---------------+
*/
void us_frametext_draw(us_frametext_s *ft, const char *text, uint width, uint height) {
assert(width > 0);
assert(height > 0);

View File

@@ -27,6 +27,8 @@
const u8 US_FRAMETEXT_FONT[128][8] = {
// https://github.com/dhepper/font8x8/blob/master/font8x8_basic.h
// Author: Daniel Hepper <daniel@hepper.net>
// License: Public Domain
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0000 (nul)
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0001
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0002

View File

@@ -56,6 +56,11 @@ us_memsink_s *us_memsink_init(
US_LOG_INFO("Using %s-sink: %s", name, obj);
if ((sink->data_size = us_memsink_calculate_size(obj)) == 0) {
US_LOG_ERROR("%s-sink: Invalid object suffix", name);
goto error;
}
const mode_t mask = umask(0);
sink->fd = shm_open(sink->obj, (server ? O_RDWR | O_CREAT : O_RDWR), mode);
umask(mask);
@@ -65,12 +70,12 @@ us_memsink_s *us_memsink_init(
goto error;
}
if (sink->server && ftruncate(sink->fd, sizeof(us_memsink_shared_s)) < 0) {
if (sink->server && ftruncate(sink->fd, sizeof(us_memsink_shared_s) + sink->data_size) < 0) {
US_LOG_PERROR("%s-sink: Can't truncate shared memory", name);
goto error;
}
if ((sink->mem = us_memsink_shared_map(sink->fd)) == NULL) {
if ((sink->mem = us_memsink_shared_map(sink->fd, sink->data_size)) == NULL) {
US_LOG_PERROR("%s-sink: Can't mmap shared memory", name);
goto error;
}
@@ -83,7 +88,7 @@ error:
void us_memsink_destroy(us_memsink_s *sink) {
if (sink->mem != NULL) {
if (us_memsink_shared_unmap(sink->mem) < 0) {
if (us_memsink_shared_unmap(sink->mem, sink->data_size) < 0) {
US_LOG_PERROR("%s-sink: Can't unmap shared memory", sink->name);
}
}
@@ -101,16 +106,35 @@ void us_memsink_destroy(us_memsink_s *sink) {
}
bool us_memsink_server_check(us_memsink_s *sink, const us_frame_s *frame) {
// Return true (the need to write to memsink) on any of these conditions:
// - EWOULDBLOCK - we have an active client;
// - Incorrect magic or version - need to first write;
// - We have some active clients by last_client_ts;
// - Frame meta differs (like size, format, but not timestamp).
// Если frame == NULL, то только проверяем наличие клиентов
// или необходимость инициализировать память.
assert(sink->server);
if (sink->mem->magic != US_MEMSINK_MAGIC || sink->mem->version != US_MEMSINK_VERSION) {
// Если регион памяти не был инициализирован, то нужно что-то туда положить.
// Блокировка не нужна, потому что только сервер пишет в эти переменные.
return true;
}
const ldf unsafe_ts = sink->mem->last_client_ts;
if (unsafe_ts != sink->unsafe_last_client_ts) {
// Клиент пишет в синке свою отметку last_client_ts при любом действии.
// Мы не берем блокировку здесь, а просто проверяем, является ли это число тем же самым,
// что было прочитано нами в предыдущих итерациях. Значению не нужно быть консистентным,
// и даже если мы прочитали мусор из-за гонки в памяти между чтением здеси и записью
// из клиента, мы все равно можем сделать вывод, есть ли у нас клиенты вообще.
// Если число число поменялось то у нас точно есть клиенты и дальнейшие проверки
// проводить не требуется. Если же число неизменно, то стоит поставить блокировку
// и проверить, нужно ли записать что-нибудь в память для инициализации фрейма.
sink->unsafe_last_client_ts = unsafe_ts;
atomic_store(&sink->has_clients, true);
return true;
}
if (flock(sink->fd, LOCK_EX | LOCK_NB) < 0) {
if (errno == EWOULDBLOCK) {
// Есть живой клиент, который прямо сейчас взял блокировку и читает фрейм из синка
atomic_store(&sink->has_clients, true);
return true;
}
@@ -118,10 +142,7 @@ bool us_memsink_server_check(us_memsink_s *sink, const us_frame_s *frame) {
return false;
}
if (sink->mem->magic != US_MEMSINK_MAGIC || sink->mem->version != US_MEMSINK_VERSION) {
return true;
}
// Проверяем, есть ли у нас живой клиент по таймауту
const bool has_clients = (sink->mem->last_client_ts + sink->client_ttl > us_get_now_monotonic());
atomic_store(&sink->has_clients, has_clients);
@@ -129,31 +150,39 @@ bool us_memsink_server_check(us_memsink_s *sink, const us_frame_s *frame) {
US_LOG_PERROR("%s-sink: Can't unlock memory", sink->name);
return false;
}
return (has_clients || !US_FRAME_COMPARE_GEOMETRY(sink->mem, frame));;
if (has_clients) {
return true;
}
if (frame != NULL && !US_FRAME_COMPARE_GEOMETRY(sink->mem, frame)) {
// Если есть изменения в геометрии/формате фрейма, то их тоже нобходимо сразу записать в синк
return true;
}
return false;
}
int us_memsink_server_put(us_memsink_s *sink, const us_frame_s *frame, bool *key_requested) {
assert(sink->server);
const long double now = us_get_now_monotonic();
const ldf now = us_get_now_monotonic();
if (frame->used > US_MEMSINK_MAX_DATA) {
if (frame->used > sink->data_size) {
US_LOG_ERROR("%s-sink: Can't put frame: is too big (%zu > %zu)",
sink->name, frame->used, US_MEMSINK_MAX_DATA);
sink->name, frame->used, sink->data_size);
return 0; // -2
}
if (us_flock_timedwait_monotonic(sink->fd, 1) == 0) {
US_LOG_VERBOSE("%s-sink: >>>>> Exposing new frame ...", sink->name);
sink->last_id = us_get_now_id();
sink->mem->id = sink->last_id;
sink->mem->id = us_get_now_id();
if (sink->mem->key_requested && frame->key) {
sink->mem->key_requested = false;
}
*key_requested = sink->mem->key_requested;
if (key_requested != NULL) { // We don't need it for non-H264 sinks
*key_requested = sink->mem->key_requested;
}
memcpy(sink->mem->data, frame->data, frame->used);
memcpy(us_memsink_get_data(sink->mem), frame->data, frame->used);
sink->mem->used = frame->used;
US_FRAME_COPY_META(frame, sink->mem);
@@ -190,26 +219,35 @@ int us_memsink_client_get(us_memsink_s *sink, us_frame_s *frame, bool *key_reque
return -1;
}
int retval = -2; // Not updated
int retval = 0;
if (sink->mem->magic == US_MEMSINK_MAGIC) {
if (sink->mem->version != US_MEMSINK_VERSION) {
US_LOG_ERROR("%s-sink: Protocol version mismatch: sink=%u, required=%u",
sink->name, sink->mem->version, US_MEMSINK_VERSION);
retval = -1;
goto done;
}
if (sink->mem->id != sink->last_id) { // When updated
sink->last_id = sink->mem->id;
us_frame_set_data(frame, sink->mem->data, sink->mem->used);
US_FRAME_COPY_META(sink->mem, frame);
*key_requested = sink->mem->key_requested;
retval = 0;
}
sink->mem->last_client_ts = us_get_now_monotonic();
if (key_required) {
sink->mem->key_requested = true;
}
if (sink->mem->magic != US_MEMSINK_MAGIC) {
retval = -2; // Not updated
goto done;
}
if (sink->mem->version != US_MEMSINK_VERSION) {
US_LOG_ERROR("%s-sink: Protocol version mismatch: sink=%u, required=%u",
sink->name, sink->mem->version, US_MEMSINK_VERSION);
retval = -1;
goto done;
}
// Let the sink know that the client is alive
sink->mem->last_client_ts = us_get_now_monotonic();
if (sink->mem->id == sink->last_readed_id) {
retval = -2; // Not updated
goto done;
}
sink->last_readed_id = sink->mem->id;
us_frame_set_data(frame, us_memsink_get_data(sink->mem), sink->mem->used);
US_FRAME_COPY_META(sink->mem, frame);
if (key_requested != NULL) { // We don't need it for non-H264 sinks
*key_requested = sink->mem->key_requested;
}
if (key_required) {
sink->mem->key_requested = true;
}
done:

View File

@@ -34,6 +34,7 @@
typedef struct {
const char *name;
const char *obj;
uz data_size;
bool server;
bool rm;
uint client_ttl; // Only for server
@@ -41,8 +42,11 @@ typedef struct {
int fd;
us_memsink_shared_s *mem;
u64 last_id;
atomic_bool has_clients; // Only for server
u64 last_readed_id; // Only for client
atomic_bool has_clients; // Only for server results
ldf unsafe_last_client_ts; // Only for server
} us_memsink_s;

72
src/libs/memsinksh.c Normal file
View File

@@ -0,0 +1,72 @@
/*****************************************************************************
# #
# uStreamer - Lightweight and fast MJPEG-HTTP streamer. #
# #
# Copyright (C) 2018-2023 Maxim Devaev <mdevaev@gmail.com> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <https://www.gnu.org/licenses/>. #
# #
*****************************************************************************/
#include "memsinksh.h"
#include <string.h>
#include <strings.h>
#include <assert.h>
#include <sys/mman.h>
#include "types.h"
us_memsink_shared_s *us_memsink_shared_map(int fd, uz data_size) {
us_memsink_shared_s *mem = mmap(
NULL,
sizeof(us_memsink_shared_s) + data_size,
PROT_READ | PROT_WRITE, MAP_SHARED,
fd, 0);
if (mem == MAP_FAILED) {
return NULL;
}
assert(mem != NULL);
return mem;
}
int us_memsink_shared_unmap(us_memsink_shared_s *mem, uz data_size) {
assert(mem != NULL);
return munmap(mem, sizeof(us_memsink_shared_s) + data_size);
}
uz us_memsink_calculate_size(const char *obj) {
const char *ptr = strrchr(obj, ':');
if (ptr == NULL) {
ptr = strrchr(obj, '.');
}
if (ptr != NULL) {
ptr += 1;
if (!strcasecmp(ptr, "jpeg")) {
return 4 * 1024 * 1024;
} else if (!strcasecmp(ptr, "h264")) {
return 2 * 1024 * 1024;
} else if (!strcasecmp(ptr, "raw")) {
return 1920 * 1200 * 3; // RGB
}
}
return 0;
}
u8 *us_memsink_get_data(us_memsink_shared_s *mem) {
return (u8*)(mem + sizeof(us_memsink_shared_s));
}

View File

@@ -22,18 +22,11 @@
#pragma once
#include <sys/mman.h>
#include "types.h"
#define US_MEMSINK_MAGIC ((u64)0xCAFEBABECAFEBABE)
#define US_MEMSINK_VERSION ((u32)4)
#ifndef US_CFG_MEMSINK_MAX_DATA
# define US_CFG_MEMSINK_MAX_DATA 33554432
#endif
#define US_MEMSINK_MAX_DATA ((uz)(US_CFG_MEMSINK_MAX_DATA))
#define US_MEMSINK_VERSION ((u32)5)
typedef struct {
@@ -57,28 +50,11 @@ typedef struct {
ldf last_client_ts;
bool key_requested;
u8 data[US_MEMSINK_MAX_DATA];
} us_memsink_shared_s;
INLINE us_memsink_shared_s *us_memsink_shared_map(int fd) {
us_memsink_shared_s *mem = mmap(
NULL,
sizeof(us_memsink_shared_s),
PROT_READ | PROT_WRITE,
MAP_SHARED,
fd,
0
);
if (mem == MAP_FAILED) {
return NULL;
}
assert(mem != NULL);
return mem;
}
us_memsink_shared_s *us_memsink_shared_map(int fd, uz data_size);
int us_memsink_shared_unmap(us_memsink_shared_s *mem, uz data_size);
INLINE int us_memsink_shared_unmap(us_memsink_shared_s *mem) {
assert(mem != NULL);
return munmap(mem, sizeof(us_memsink_shared_s));
}
uz us_memsink_calculate_size(const char *obj);
u8 *us_memsink_get_data(us_memsink_shared_s *mem);

View File

@@ -104,9 +104,9 @@ int us_queue_get(us_queue_s *queue, void **item, ldf timeout) {
#undef _WAIT_OR_UNLOCK
/*int us_queue_get_free(us_queue_s *queue) {
bool us_queue_is_empty(us_queue_s *queue) {
US_MUTEX_LOCK(queue->mutex);
const uint size = queue->size;
US_MUTEX_UNLOCK(queue->mutex);
return queue->capacity - size;
}*/
return (bool)(queue->capacity - size);
}

View File

@@ -45,7 +45,7 @@ typedef struct {
#define US_QUEUE_DELETE_WITH_ITEMS(x_queue, x_free_item) { \
if (x_queue) { \
while (!us_queue_get_free(x_queue)) { \
while (!us_queue_is_empty(x_queue)) { \
void *m_ptr; \
if (!us_queue_get(x_queue, &m_ptr, 0)) { \
US_DELETE(m_ptr, x_free_item); \
@@ -61,4 +61,4 @@ void us_queue_destroy(us_queue_s *queue);
int us_queue_put(us_queue_s *queue, void *item, ldf timeout);
int us_queue_get(us_queue_s *queue, void **item, ldf timeout);
// int us_queue_get_free(us_queue_s *queue);
bool us_queue_is_empty(us_queue_s *queue);

82
src/libs/signal.c Normal file
View File

@@ -0,0 +1,82 @@
/*****************************************************************************
# #
# uStreamer - Lightweight and fast MJPEG-HTTP streamer. #
# #
# Copyright (C) 2018-2023 Maxim Devaev <mdevaev@gmail.com> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <https://www.gnu.org/licenses/>. #
# #
*****************************************************************************/
#include "signal.h"
#include <string.h>
#include <signal.h>
#include <assert.h>
#if defined(__GLIBC__) && __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 32
# define HAS_SIGABBREV_NP
#endif
#include "types.h"
#include "tools.h"
#include "logging.h"
char *us_signum_to_string(int signum) {
# ifdef HAS_SIGABBREV_NP
const char *const name = sigabbrev_np(signum);
# else
const char *const name = (
signum == SIGTERM ? "TERM" :
signum == SIGINT ? "INT" :
signum == SIGPIPE ? "PIPE" :
NULL
);
# endif
char *buf;
if (name != NULL) {
US_ASPRINTF(buf, "SIG%s", name);
} else {
US_ASPRINTF(buf, "SIG[%d]", signum);
}
return buf;
}
void us_install_signals_handler(us_signal_handler_f handler, bool ignore_sigpipe) {
struct sigaction sig_act = {0};
assert(!sigemptyset(&sig_act.sa_mask));
sig_act.sa_handler = handler;
assert(!sigaddset(&sig_act.sa_mask, SIGINT));
assert(!sigaddset(&sig_act.sa_mask, SIGTERM));
if (!ignore_sigpipe) {
assert(!sigaddset(&sig_act.sa_mask, SIGPIPE));
}
US_LOG_DEBUG("Installing SIGINT handler ...");
assert(!sigaction(SIGINT, &sig_act, NULL));
US_LOG_DEBUG("Installing SIGTERM handler ...");
assert(!sigaction(SIGTERM, &sig_act, NULL));
if (!ignore_sigpipe) {
US_LOG_DEBUG("Installing SIGPIPE handler ...");
assert(!sigaction(SIGPIPE, &sig_act, NULL));
} else {
US_LOG_DEBUG("Ignoring SIGPIPE ...");
assert(signal(SIGPIPE, SIG_IGN) != SIG_ERR);
}
}

32
src/libs/signal.h Normal file
View File

@@ -0,0 +1,32 @@
/*****************************************************************************
# #
# uStreamer - Lightweight and fast MJPEG-HTTP streamer. #
# #
# Copyright (C) 2018-2023 Maxim Devaev <mdevaev@gmail.com> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <https://www.gnu.org/licenses/>. #
# #
*****************************************************************************/
#pragma once
#include "types.h"
typedef void (*us_signal_handler_f)(int);
char *us_signum_to_string(int signum);
void us_install_signals_handler(us_signal_handler_f handler, bool ignore_sigpipe);

View File

@@ -28,11 +28,9 @@
#include <linux/videodev2.h>
#include <linux/v4l2-controls.h>
#include "uslibs/types.h"
#include "uslibs/tools.h"
#include "uslibs/xioctl.h"
#include "logging.h"
#include "types.h"
#include "tools.h"
#include "xioctl.h"
#ifndef V4L2_CID_USER_TC358743_BASE
@@ -46,28 +44,22 @@
#endif
int us_tc358743_read_info(const char *path, us_tc358743_info_s *info) {
US_MEMSET_ZERO(*info);
int us_tc358743_xioctl_get_audio_hz(int fd, uint *audio_hz) {
*audio_hz = 0;
int fd = -1;
if ((fd = open(path, O_RDWR)) < 0) {
US_JLOG_PERROR("audio", "Can't open TC358743 V4L2 device");
struct v4l2_control ctl = {.id = TC358743_CID_AUDIO_PRESENT};
if (us_xioctl(fd, VIDIOC_G_CTRL, &ctl) < 0) {
return -1;
}
if (!ctl.value) {
return 0; // No audio
}
# define READ_CID(x_cid, x_field) { \
struct v4l2_control m_ctl = {.id = x_cid}; \
if (us_xioctl(fd, VIDIOC_G_CTRL, &m_ctl) < 0) { \
US_JLOG_PERROR("audio", "Can't get value of " #x_cid); \
close(fd); \
return -1; \
} \
info->x_field = m_ctl.value; \
}
READ_CID(TC358743_CID_AUDIO_PRESENT, has_audio);
READ_CID(TC358743_CID_AUDIO_SAMPLING_RATE, audio_hz);
# undef READ_CID
close(fd);
US_MEMSET_ZERO(ctl);
ctl.id = TC358743_CID_AUDIO_SAMPLING_RATE;
if (us_xioctl(fd, VIDIOC_G_CTRL, &ctl) < 0) {
return -2;
}
*audio_hz = ctl.value;
return 0;
}

View File

@@ -22,13 +22,7 @@
#pragma once
#include "uslibs/types.h"
#include "types.h"
typedef struct {
bool has_audio;
uint audio_hz;
} us_tc358743_info_s;
int us_tc358743_read_info(const char *path, us_tc358743_info_s *info);
int us_tc358743_xioctl_get_audio_hz(int fd, uint *audio_hz);

View File

@@ -24,6 +24,7 @@
#include <stdio.h>
#include <unistd.h>
#include <signal.h>
#include <assert.h>
#include <sys/syscall.h>
@@ -56,9 +57,14 @@
us_thread_set_name(m_new_tname_buf); \
}
#else
# define US_THREAD_RENAME(_fmt, ...)
# define US_THREAD_RENAME(x_fmt, ...)
#endif
#define US_THREAD_SETTLE(x_fmt, ...) { \
US_THREAD_RENAME((x_fmt), ##__VA_ARGS__); \
us_thread_block_signals(); \
}
#define US_MUTEX_INIT(x_mutex) assert(!pthread_mutex_init(&(x_mutex), NULL))
#define US_MUTEX_DESTROY(x_mutex) assert(!pthread_mutex_destroy(&(x_mutex)))
#define US_MUTEX_LOCK(x_mutex) assert(!pthread_mutex_lock(&(x_mutex)))
@@ -78,7 +84,7 @@ INLINE void us_thread_set_name(const char *name) {
# elif defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
pthread_set_name_np(pthread_self(), name);
# elif defined(__NetBSD__)
pthread_setname_np(pthread_self(), "%s", (void *)name);
pthread_setname_np(pthread_self(), "%s", (void*)name);
# else
# error us_thread_set_name() not implemented, you can disable it using WITH_PTHREAD_NP=0
# endif
@@ -124,3 +130,11 @@ INLINE void us_thread_get_name(char *name) { // Always required for logging
}
#endif
}
INLINE void us_thread_block_signals(void) {
sigset_t mask;
assert(!sigemptyset(&mask));
assert(!sigaddset(&mask, SIGINT));
assert(!sigaddset(&mask, SIGTERM));
assert(!pthread_sigmask(SIG_BLOCK, &mask, NULL));
}

View File

@@ -27,6 +27,7 @@
#include <string.h>
#include <unistd.h>
#include <limits.h>
#include <locale.h> // Make C locale for strerror_l()
#include <errno.h>
#include <math.h>
#include <time.h>
@@ -34,12 +35,6 @@
#include <sys/file.h>
#if defined(__GLIBC__) && __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 32
# define HAS_SIGABBREV_NP
#else
# include <signal.h>
#endif
#include "types.h"
@@ -77,6 +72,14 @@
(m_a > m_b ? m_a : m_b); \
})
#define US_ONCE(...) { \
const int m_reported = __LINE__; \
if (m_reported != once) { \
__VA_ARGS__; \
once = m_reported; \
} \
}
INLINE char *us_strdup(const char *str) {
char *const new = strdup(str);
@@ -181,34 +184,26 @@ INLINE int us_flock_timedwait_monotonic(int fd, ldf timeout) {
}
INLINE char *us_errno_to_string(int error) {
# if (_POSIX_C_SOURCE >= 200112L) && !defined(_GNU_SOURCE) // XSI
char buf[2048];
const uz max_len = sizeof(buf) - 1;
# if (_POSIX_C_SOURCE >= 200112L) && ! _GNU_SOURCE
if (strerror_r(error, buf, max_len) != 0) {
US_SNPRINTF(buf, max_len, "Errno = %d", error);
}
return us_strdup(buf);
# else
return us_strdup(strerror_r(error, buf, max_len));
# endif
}
INLINE char *us_signum_to_string(int signum) {
# ifdef HAS_SIGABBREV_NP
const char *const name = sigabbrev_np(signum);
# else
const char *const name = (
signum == SIGTERM ? "TERM" :
signum == SIGINT ? "INT" :
signum == SIGPIPE ? "PIPE" :
NULL
);
# endif
char *buf;
if (name != NULL) {
US_ASPRINTF(buf, "SIG%s", name);
} else {
US_ASPRINTF(buf, "SIG[%d]", signum);
# elif defined(__GLIBC__) && defined(_GNU_SOURCE) // GNU
char buf[2048];
const uz max_len = sizeof(buf) - 1;
return us_strdup(strerror_r(error, buf, max_len));
# else // BSD
locale_t locale = newlocale(LC_MESSAGES_MASK, "C", NULL);
if (locale) {
char *ptr = us_strdup(strerror_l(error, locale));
freelocale(locale);
return ptr;
}
return buf;
return us_strdup("!!! newlocale() error !!!");
# endif
}

View File

@@ -54,7 +54,7 @@ int us_unjpeg(const us_frame_s *src, us_frame_s *dest, bool decode) {
// https://stackoverflow.com/questions/19857766/error-handling-in-libjpeg
_jpeg_error_manager_s jpeg_error;
jpeg.err = jpeg_std_error((struct jpeg_error_mgr *)&jpeg_error);
jpeg.err = jpeg_std_error((struct jpeg_error_mgr*)&jpeg_error);
jpeg_error.mgr.error_exit = _jpeg_error_handler;
jpeg_error.frame = src;
if (setjmp(jpeg_error.jmp) < 0) {
@@ -94,7 +94,7 @@ done:
}
static void _jpeg_error_handler(j_common_ptr jpeg) {
_jpeg_error_manager_s *jpeg_error = (_jpeg_error_manager_s *)jpeg->err;
_jpeg_error_manager_s *jpeg_error = (_jpeg_error_manager_s*)jpeg->err;
char msg[JMSG_LENGTH_MAX];
(*jpeg_error->mgr.format_message)(jpeg, msg);

View File

@@ -73,13 +73,13 @@ void us_encoder_destroy(us_encoder_s *enc) {
free(enc);
}
us_encoder_type_e us_encoder_parse_type(const char *str) {
int us_encoder_parse_type(const char *str) {
US_ARRAY_ITERATE(_ENCODER_TYPES, 0, item, {
if (!strcasecmp(item->name, str)) {
return item->type;
}
});
return US_ENCODER_TYPE_UNKNOWN;
return -1;
}
const char *us_encoder_type_to_string(us_encoder_type_e type) {
@@ -91,7 +91,9 @@ const char *us_encoder_type_to_string(us_encoder_type_e type) {
return _ENCODER_TYPES[0].name;
}
us_workers_pool_s *us_encoder_workers_pool_init(us_encoder_s *enc, us_device_s *dev) {
void us_encoder_open(us_encoder_s *enc, us_device_s *dev) {
assert(enc->run->pool == NULL);
# define DR(x_next) dev->run->x_next
us_encoder_type_e type = (_ER(cpu_forced) ? US_ENCODER_TYPE_CPU : enc->type);
@@ -162,15 +164,20 @@ us_workers_pool_s *us_encoder_workers_pool_init(us_encoder_s *enc, us_device_s *
: 0
);
return us_workers_pool_init(
enc->run->pool = us_workers_pool_init(
"JPEG", "jw", n_workers, desired_interval,
_worker_job_init, (void *)enc,
_worker_job_init, (void*)enc,
_worker_job_destroy,
_worker_run_job);
# undef DR
}
void us_encoder_close(us_encoder_s *enc) {
assert(enc->run->pool != NULL);
US_DELETE(enc->run->pool, us_workers_pool_destroy);
}
void us_encoder_get_runtime_params(us_encoder_s *enc, us_encoder_type_e *type, unsigned *quality) {
US_MUTEX_LOCK(_ER(mutex));
*type = _ER(type);
@@ -181,25 +188,23 @@ void us_encoder_get_runtime_params(us_encoder_s *enc, us_encoder_type_e *type, u
static void *_worker_job_init(void *v_enc) {
us_encoder_job_s *job;
US_CALLOC(job, 1);
job->enc = (us_encoder_s *)v_enc;
job->enc = (us_encoder_s*)v_enc;
job->dest = us_frame_init();
return (void *)job;
return (void*)job;
}
static void _worker_job_destroy(void *v_job) {
us_encoder_job_s *job = (us_encoder_job_s *)v_job;
us_encoder_job_s *job = v_job;
us_frame_destroy(job->dest);
free(job);
}
static bool _worker_run_job(us_worker_s *wr) {
us_encoder_job_s *job = (us_encoder_job_s *)wr->job;
us_encoder_job_s *job = wr->job;
us_encoder_s *enc = job->enc; // Just for _ER()
const us_frame_s *src = &job->hw->raw;
us_frame_s *dest = job->dest;
assert(_ER(type) != US_ENCODER_TYPE_UNKNOWN);
if (_ER(type) == US_ENCODER_TYPE_CPU) {
US_LOG_VERBOSE("Compressing JPEG using CPU: worker=%s, buffer=%u",
wr->name, job->hw->buf.index);
@@ -223,6 +228,9 @@ static bool _worker_run_job(us_worker_s *wr) {
us_frame_encoding_begin(src, dest, V4L2_PIX_FMT_JPEG);
usleep(5000); // Просто чтобы работала логика desired_fps
dest->encode_end_ts = us_get_now_monotonic(); // us_frame_encoding_end()
} else {
assert(0 && "Unknown encoder type");
}
US_LOG_VERBOSE("Compressed new JPEG: size=%zu, time=%0.3Lf, worker=%s, buffer=%u",

View File

@@ -47,7 +47,6 @@
#define ENCODER_TYPES_STR "CPU, HW, M2M-VIDEO, M2M-IMAGE, NOOP"
typedef enum {
US_ENCODER_TYPE_UNKNOWN, // Only for us_encoder_parse_type() and main()
US_ENCODER_TYPE_CPU,
US_ENCODER_TYPE_HW,
US_ENCODER_TYPE_M2M_VIDEO,
@@ -63,6 +62,8 @@ typedef struct {
unsigned n_m2ms;
us_m2m_encoder_s **m2ms;
us_workers_pool_s *pool;
} us_encoder_runtime_s;
typedef struct {
@@ -83,10 +84,10 @@ typedef struct {
us_encoder_s *us_encoder_init(void);
void us_encoder_destroy(us_encoder_s *enc);
us_encoder_type_e us_encoder_parse_type(const char *str);
int us_encoder_parse_type(const char *str);
const char *us_encoder_type_to_string(us_encoder_type_e type);
us_workers_pool_s *us_encoder_workers_pool_init(us_encoder_s *enc, us_device_s *dev);
void us_encoder_get_runtime_params(us_encoder_s *enc, us_encoder_type_e *type, unsigned *quality);
void us_encoder_open(us_encoder_s *enc, us_device_s *dev);
void us_encoder_close(us_encoder_s *enc);
int us_encoder_compress(us_encoder_s *enc, unsigned worker_number, us_frame_s *src, us_frame_s *dest);
void us_encoder_get_runtime_params(us_encoder_s *enc, us_encoder_type_e *type, unsigned *quality);

View File

@@ -29,8 +29,8 @@
typedef struct {
struct jpeg_destination_mgr mgr; // Default manager
JOCTET *buf; // Start of buffer
struct jpeg_destination_mgr mgr; // Default manager
JOCTET *buf; // Start of buffer
us_frame_s *frame;
} _jpeg_dest_manager_s;
@@ -63,7 +63,12 @@ void us_cpu_encoder_compress(const us_frame_s *src, us_frame_s *dest, unsigned q
jpeg.image_width = src->width;
jpeg.image_height = src->height;
jpeg.input_components = 3;
jpeg.in_color_space = ((src->format == V4L2_PIX_FMT_YUYV || src->format == V4L2_PIX_FMT_UYVY) ? JCS_YCbCr : JCS_RGB);
switch (src->format) {
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_YVYU:
case V4L2_PIX_FMT_UYVY: jpeg.in_color_space = JCS_YCbCr; break;
default: jpeg.in_color_space = JCS_RGB; break;
}
jpeg_set_defaults(&jpeg);
jpeg_set_quality(&jpeg, quality, TRUE);
@@ -89,12 +94,12 @@ void us_cpu_encoder_compress(const us_frame_s *src, us_frame_s *dest, unsigned q
static void _jpeg_set_dest_frame(j_compress_ptr jpeg, us_frame_s *frame) {
if (jpeg->dest == NULL) {
assert((jpeg->dest = (struct jpeg_destination_mgr *)(*jpeg->mem->alloc_small)(
assert((jpeg->dest = (struct jpeg_destination_mgr*)(*jpeg->mem->alloc_small)(
(j_common_ptr) jpeg, JPOOL_PERMANENT, sizeof(_jpeg_dest_manager_s)
)) != NULL);
}
_jpeg_dest_manager_s *const dest = (_jpeg_dest_manager_s *)jpeg->dest;
_jpeg_dest_manager_s *const dest = (_jpeg_dest_manager_s*)jpeg->dest;
dest->mgr.init_destination = _jpeg_init_destination;
dest->mgr.empty_output_buffer = _jpeg_empty_output_buffer;
dest->mgr.term_destination = _jpeg_term_destination;
@@ -221,10 +226,10 @@ static void _jpeg_write_scanlines_bgr24(struct jpeg_compress_struct *jpeg, const
#define JPEG_OUTPUT_BUFFER_SIZE ((size_t)4096)
static void _jpeg_init_destination(j_compress_ptr jpeg) {
_jpeg_dest_manager_s *const dest = (_jpeg_dest_manager_s *)jpeg->dest;
_jpeg_dest_manager_s *const dest = (_jpeg_dest_manager_s*)jpeg->dest;
// Allocate the output buffer - it will be released when done with image
assert((dest->buf = (JOCTET *)(*jpeg->mem->alloc_small)(
assert((dest->buf = (JOCTET*)(*jpeg->mem->alloc_small)(
(j_common_ptr) jpeg, JPOOL_IMAGE, JPEG_OUTPUT_BUFFER_SIZE * sizeof(JOCTET)
)) != NULL);
@@ -235,7 +240,7 @@ static void _jpeg_init_destination(j_compress_ptr jpeg) {
static boolean _jpeg_empty_output_buffer(j_compress_ptr jpeg) {
// Called whenever local jpeg buffer fills up
_jpeg_dest_manager_s *const dest = (_jpeg_dest_manager_s *)jpeg->dest;
_jpeg_dest_manager_s *const dest = (_jpeg_dest_manager_s*)jpeg->dest;
us_frame_append_data(dest->frame, dest->buf, JPEG_OUTPUT_BUFFER_SIZE);
@@ -249,7 +254,7 @@ static void _jpeg_term_destination(j_compress_ptr jpeg) {
// Called by jpeg_finish_compress after all data has been written.
// Usually needs to flush buffer.
_jpeg_dest_manager_s *const dest = (_jpeg_dest_manager_s *)jpeg->dest;
_jpeg_dest_manager_s *const dest = (_jpeg_dest_manager_s*)jpeg->dest;
const size_t final = JPEG_OUTPUT_BUFFER_SIZE - dest->mgr.free_in_buffer;
// Write any data remaining in the buffer.

View File

@@ -22,8 +22,19 @@
#include "h264.h"
#include <stdatomic.h>
us_h264_stream_s *us_h264_stream_init(us_memsink_s *sink, const char *path, unsigned bitrate, unsigned gop) {
#include "../libs/types.h"
#include "../libs/tools.h"
#include "../libs/logging.h"
#include "../libs/frame.h"
#include "../libs/memsink.h"
#include "../libs/unjpeg.h"
#include "m2m.h"
us_h264_stream_s *us_h264_stream_init(us_memsink_s *sink, const char *path, uint bitrate, uint gop) {
us_h264_stream_s *h264;
US_CALLOC(h264, 1);
h264->sink = sink;
@@ -42,18 +53,15 @@ void us_h264_stream_destroy(us_h264_stream_s *h264) {
}
void us_h264_stream_process(us_h264_stream_s *h264, const us_frame_s *frame, bool force_key) {
if (!us_memsink_server_check(h264->sink, frame)) {
return;
}
if (us_is_jpeg(frame->format)) {
const long double now = us_get_now_monotonic();
const ldf now_ts = us_get_now_monotonic();
US_LOG_DEBUG("H264: Input frame is JPEG; decoding ...");
if (us_unjpeg(frame, h264->tmp_src, true) < 0) {
atomic_store(&h264->online, false);
return;
}
frame = h264->tmp_src;
US_LOG_VERBOSE("H264: JPEG decoded; time=%.3Lf", us_get_now_monotonic() - now);
US_LOG_VERBOSE("H264: JPEG decoded; time=%.3Lf", us_get_now_monotonic() - now_ts);
}
if (h264->key_requested) {

View File

@@ -22,15 +22,12 @@
#pragma once
#include <stdbool.h>
#include <stdatomic.h>
#include <assert.h>
#include "../libs/tools.h"
#include "../libs/logging.h"
#include "../libs/types.h"
#include "../libs/frame.h"
#include "../libs/memsink.h"
#include "../libs/unjpeg.h"
#include "m2m.h"
@@ -44,6 +41,6 @@ typedef struct {
} us_h264_stream_s;
us_h264_stream_s *us_h264_stream_init(us_memsink_s *sink, const char *path, unsigned bitrate, unsigned gop);
us_h264_stream_s *us_h264_stream_init(us_memsink_s *sink, const char *path, uint bitrate, uint gop);
void us_h264_stream_destroy(us_h264_stream_s *h264);
void us_h264_stream_process(us_h264_stream_s *h264, const us_frame_s *frame, bool force_key);

View File

@@ -22,6 +22,14 @@
#include "bev.h"
#include <string.h>
#include <errno.h>
#include <event2/util.h>
#include <event2/bufferevent.h>
#include "../../libs/tools.h"
char *us_bufferevent_format_reason(short what) {
char *reason;
@@ -34,6 +42,7 @@ char *us_bufferevent_format_reason(short what) {
strncat(reason, perror_str, 1023);
free(perror_str);
strcat(reason, " (");
# define FILL_REASON(x_bev, x_name) { \
if (what & x_bev) { \
if (first) { \
@@ -44,7 +53,6 @@ char *us_bufferevent_format_reason(short what) {
strcat(reason, x_name); \
} \
}
FILL_REASON(BEV_EVENT_READING, "reading");
FILL_REASON(BEV_EVENT_WRITING, "writing");
FILL_REASON(BEV_EVENT_ERROR, "error");

View File

@@ -22,14 +22,5 @@
#pragma once
#include <string.h>
#include <errno.h>
#include <event2/util.h>
#include <event2/bufferevent.h>
#include "../../libs/tools.h"
#include "../../libs/logging.h"
char *us_bufferevent_format_reason(short what);

View File

@@ -22,6 +22,13 @@
#include "mime.h"
#include <string.h>
#include <event2/util.h>
#include "../../libs/tools.h"
#include "../../libs/array.h"
static const struct {
const char *ext; // cppcheck-suppress unusedStructMember

View File

@@ -22,12 +22,5 @@
#pragma once
#include <string.h>
#include <event2/util.h>
#include "../../libs/tools.h"
#include "../../libs/array.h"
const char *us_guess_mime_type(const char *str);

View File

@@ -22,6 +22,14 @@
#include "path.h"
#ifdef TEST_HTTP_PATH
# include <stdio.h>
# include <stdlib.h>
#endif
#include <string.h>
#include "../../libs/tools.h"
char *us_simplify_request_path(const char *str) {
// Based on Lighttpd sources:

View File

@@ -22,13 +22,5 @@
#pragma once
#ifdef TEST_HTTP_PATH
# include <stdio.h>
# include <stdlib.h>
#endif
#include <string.h>
#include "../../libs/tools.h"
char *us_simplify_request_path(const char *str);

View File

@@ -22,6 +22,61 @@
#include "server.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdbool.h>
#include <stdatomic.h>
#include <string.h>
#include <inttypes.h>
#include <unistd.h>
#include <fcntl.h>
#include <assert.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <netinet/tcp.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <event2/util.h>
#include <event2/event.h>
#include <event2/thread.h>
#include <event2/http.h>
#include <event2/buffer.h>
#include <event2/bufferevent.h>
#include <event2/keyvalq_struct.h>
#ifndef EVTHREAD_USE_PTHREADS_IMPLEMENTED
# error Required libevent-pthreads support
#endif
#include "../../libs/types.h"
#include "../../libs/tools.h"
#include "../../libs/threading.h"
#include "../../libs/logging.h"
#include "../../libs/process.h"
#include "../../libs/frame.h"
#include "../../libs/base64.h"
#include "../../libs/list.h"
#include "../data/index_html.h"
#include "../data/favicon_ico.h"
#include "../encoder.h"
#include "../stream.h"
#ifdef WITH_GPIO
# include "../gpio/gpio.h"
#endif
#include "bev.h"
#include "unix.h"
#include "uri.h"
#include "mime.h"
#include "static.h"
#ifdef WITH_SYSTEMD
# include "systemd/systemd.h"
#endif
static int _http_preprocess_request(struct evhttp_request *request, us_server_s *server);
@@ -37,9 +92,9 @@ static void _http_callback_stream(struct evhttp_request *request, void *v_server
static void _http_callback_stream_write(struct bufferevent *buf_event, void *v_ctx);
static void _http_callback_stream_error(struct bufferevent *buf_event, short what, void *v_ctx);
static void _http_request_watcher(int fd, short event, void *v_server);
static void _http_refresher(int fd, short event, void *v_server);
static void _http_queue_send_stream(us_server_s *server, bool stream_updated, bool frame_updated);
static void _http_send_stream(us_server_s *server, bool stream_updated, bool frame_updated);
static void _http_send_snapshot(us_server_s *server);
static bool _expose_frame(us_server_s *server, const us_frame_s *frame);
@@ -47,10 +102,19 @@ static const char *_http_get_header(struct evhttp_request *request, const char *
static char *_http_get_client_hostport(struct evhttp_request *request);
#define _S_LOG_ERROR(x_msg, ...) US_LOG_ERROR("HTTP: " x_msg, ##__VA_ARGS__)
#define _S_LOG_PERROR(x_msg, ...) US_LOG_PERROR("HTTP: " x_msg, ##__VA_ARGS__)
#define _S_LOG_INFO(x_msg, ...) US_LOG_INFO("HTTP: " x_msg, ##__VA_ARGS__)
#define _S_LOG_VERBOSE(x_msg, ...) US_LOG_VERBOSE("HTTP: " x_msg, ##__VA_ARGS__)
#define _S_LOG_DEBUG(x_msg, ...) US_LOG_DEBUG("HTTP: " x_msg, ##__VA_ARGS__)
#define _A_EVBUFFER_NEW(x_buf) assert((x_buf = evbuffer_new()) != NULL)
#define _A_EVBUFFER_ADD(x_buf, x_data, x_size) assert(!evbuffer_add(x_buf, x_data, x_size))
#define _A_EVBUFFER_ADD_PRINTF(x_buf, x_fmt, ...) assert(evbuffer_add_printf(x_buf, x_fmt, ##__VA_ARGS__) >= 0)
#define _A_ADD_HEADER(x_request, x_key, x_value) \
assert(!evhttp_add_header(evhttp_request_get_output_headers(x_request), x_key, x_value))
us_server_s *us_server_init(us_stream_s *stream) {
us_server_exposed_s *exposed;
@@ -91,11 +155,6 @@ void us_server_destroy(us_server_s *server) {
event_free(run->refresher);
}
if (run->request_watcher != NULL) {
event_del(run->request_watcher);
event_free(run->request_watcher);
}
evhttp_free(run->http);
US_CLOSE_FD(run->ext_fd);
event_base_free(run->base);
@@ -104,6 +163,10 @@ void us_server_destroy(us_server_s *server) {
libevent_global_shutdown();
# endif
US_LIST_ITERATE(run->snapshot_clients, client, { // cppcheck-suppress constStatement
free(client);
});
US_LIST_ITERATE(run->stream_clients, client, { // cppcheck-suppress constStatement
free(client->key);
free(client->hostport);
@@ -125,29 +188,21 @@ int us_server_listen(us_server_s *server) {
{
if (server->static_path[0] != '\0') {
US_LOG_INFO("Enabling HTTP file server: %s", server->static_path);
evhttp_set_gencb(run->http, _http_callback_static, (void *)server);
_S_LOG_INFO("Enabling the file server: %s", server->static_path);
evhttp_set_gencb(run->http, _http_callback_static, (void*)server);
} else {
assert(!evhttp_set_cb(run->http, "/", _http_callback_root, (void *)server));
assert(!evhttp_set_cb(run->http, "/favicon.ico", _http_callback_favicon, (void *)server));
assert(!evhttp_set_cb(run->http, "/", _http_callback_root, (void*)server));
assert(!evhttp_set_cb(run->http, "/favicon.ico", _http_callback_favicon, (void*)server));
}
assert(!evhttp_set_cb(run->http, "/state", _http_callback_state, (void *)server));
assert(!evhttp_set_cb(run->http, "/snapshot", _http_callback_snapshot, (void *)server));
assert(!evhttp_set_cb(run->http, "/stream", _http_callback_stream, (void *)server));
assert(!evhttp_set_cb(run->http, "/state", _http_callback_state, (void*)server));
assert(!evhttp_set_cb(run->http, "/snapshot", _http_callback_snapshot, (void*)server));
assert(!evhttp_set_cb(run->http, "/stream", _http_callback_stream, (void*)server));
}
us_frame_copy(stream->run->blank->jpeg, ex->frame);
ex->notify_last_width = ex->frame->width;
ex->notify_last_height = ex->frame->height;
if (server->exit_on_no_clients > 0) {
run->last_request_ts = us_get_now_monotonic();
struct timeval interval = {0};
interval.tv_usec = 100000;
assert((run->request_watcher = event_new(run->base, -1, EV_PERSIST, _http_request_watcher, server)) != NULL);
assert(!event_add(run->request_watcher, &interval));
}
{
struct timeval interval = {0};
if (stream->dev->desired_fps > 0) {
@@ -166,17 +221,17 @@ int us_server_listen(us_server_s *server) {
char *raw_token;
US_ASPRINTF(raw_token, "%s:%s", server->user, server->passwd);
us_base64_encode((uint8_t *)raw_token, strlen(raw_token), &encoded_token, NULL);
us_base64_encode((u8*)raw_token, strlen(raw_token), &encoded_token, NULL);
free(raw_token);
US_ASPRINTF(run->auth_token, "Basic %s", encoded_token);
free(encoded_token);
US_LOG_INFO("Using HTTP basic auth");
_S_LOG_INFO("Using HTTP basic auth");
}
if (server->unix_path[0] != '\0') {
US_LOG_DEBUG("Binding HTTP to UNIX socket '%s' ...", server->unix_path);
_S_LOG_DEBUG("Binding server to UNIX socket '%s' ...", server->unix_path);
if ((run->ext_fd = us_evhttp_bind_unix(
run->http,
server->unix_path,
@@ -185,57 +240,55 @@ int us_server_listen(us_server_s *server) {
) {
return -1;
}
US_LOG_INFO("Listening HTTP on UNIX socket '%s'", server->unix_path);
_S_LOG_INFO("Listening HTTP on UNIX socket '%s'", server->unix_path);
# ifdef WITH_SYSTEMD
} else if (server->systemd) {
US_LOG_DEBUG("Binding HTTP to systemd socket ...");
_S_LOG_DEBUG("Binding HTTP to systemd socket ...");
if ((run->ext_fd = us_evhttp_bind_systemd(run->http)) < 0) {
return -1;
}
US_LOG_INFO("Listening systemd socket ...");
_S_LOG_INFO("Listening systemd socket ...");
# endif
} else {
US_LOG_DEBUG("Binding HTTP to [%s]:%u ...", server->host, server->port);
_S_LOG_DEBUG("Binding HTTP to [%s]:%u ...", server->host, server->port);
if (evhttp_bind_socket(run->http, server->host, server->port) < 0) {
US_LOG_PERROR("Can't bind HTTP on [%s]:%u", server->host, server->port)
_S_LOG_PERROR("Can't bind HTTP on [%s]:%u", server->host, server->port)
return -1;
}
US_LOG_INFO("Listening HTTP on [%s]:%u", server->host, server->port);
_S_LOG_INFO("Listening HTTP on [%s]:%u", server->host, server->port);
}
return 0;
}
void us_server_loop(us_server_s *server) {
US_LOG_INFO("Starting HTTP eventloop ...");
_S_LOG_INFO("Starting eventloop ...");
event_base_dispatch(server->run->base);
US_LOG_INFO("HTTP eventloop stopped");
_S_LOG_INFO("Eventloop stopped");
}
void us_server_loop_break(us_server_s *server) {
event_base_loopbreak(server->run->base);
}
#define ADD_HEADER(x_key, x_value) assert(!evhttp_add_header(evhttp_request_get_output_headers(request), x_key, x_value))
static int _http_preprocess_request(struct evhttp_request *request, us_server_s *server) {
us_server_runtime_s *const run = server->run;
const us_server_runtime_s *const run = server->run;
run->last_request_ts = us_get_now_monotonic();
atomic_store(&server->stream->run->http_last_request_ts, us_get_now_monotonic());
if (server->allow_origin[0] != '\0') {
const char *const cors_headers = _http_get_header(request, "Access-Control-Request-Headers");
const char *const cors_method = _http_get_header(request, "Access-Control-Request-Method");
ADD_HEADER("Access-Control-Allow-Origin", server->allow_origin);
ADD_HEADER("Access-Control-Allow-Credentials", "true");
_A_ADD_HEADER(request, "Access-Control-Allow-Origin", server->allow_origin);
_A_ADD_HEADER(request, "Access-Control-Allow-Credentials", "true");
if (cors_headers != NULL) {
ADD_HEADER("Access-Control-Allow-Headers", cors_headers);
_A_ADD_HEADER(request, "Access-Control-Allow-Headers", cors_headers);
}
if (cors_method != NULL) {
ADD_HEADER("Access-Control-Allow-Methods", cors_method);
_A_ADD_HEADER(request, "Access-Control-Allow-Methods", cors_method);
}
}
@@ -246,9 +299,8 @@ static int _http_preprocess_request(struct evhttp_request *request, us_server_s
if (run->auth_token != NULL) {
const char *const token = _http_get_header(request, "Authorization");
if (token == NULL || strcmp(token, run->auth_token) != 0) {
ADD_HEADER("WWW-Authenticate", "Basic realm=\"Restricted area\"");
_A_ADD_HEADER(request, "WWW-Authenticate", "Basic realm=\"Restricted area\"");
evhttp_send_reply(request, 401, "Unauthorized", NULL);
return -1;
}
@@ -258,7 +310,6 @@ static int _http_preprocess_request(struct evhttp_request *request, us_server_s
evhttp_send_reply(request, HTTP_OK, "OK", NULL);
return -1;
}
return 0;
}
@@ -296,7 +347,7 @@ static int _http_check_run_compat_action(struct evhttp_request *request, void *v
}
static void _http_callback_root(struct evhttp_request *request, void *v_server) {
us_server_s *const server = (us_server_s *)v_server;
us_server_s *const server = v_server;
PREPROCESS_REQUEST;
COMPAT_REQUEST;
@@ -304,28 +355,28 @@ static void _http_callback_root(struct evhttp_request *request, void *v_server)
struct evbuffer *buf;
_A_EVBUFFER_NEW(buf);
_A_EVBUFFER_ADD_PRINTF(buf, "%s", US_HTML_INDEX_PAGE);
ADD_HEADER("Content-Type", "text/html");
_A_ADD_HEADER(request, "Content-Type", "text/html");
evhttp_send_reply(request, HTTP_OK, "OK", buf);
evbuffer_free(buf);
}
static void _http_callback_favicon(struct evhttp_request *request, void *v_server) {
us_server_s *const server = (us_server_s *)v_server;
us_server_s *const server = v_server;
PREPROCESS_REQUEST;
struct evbuffer *buf;
_A_EVBUFFER_NEW(buf);
_A_EVBUFFER_ADD(buf, (const void *)US_FAVICON_ICO_DATA, US_FAVICON_ICO_DATA_SIZE);
ADD_HEADER("Content-Type", "image/x-icon");
_A_EVBUFFER_ADD(buf, (const void*)US_FAVICON_ICO_DATA, US_FAVICON_ICO_DATA_SIZE);
_A_ADD_HEADER(request, "Content-Type", "image/x-icon");
evhttp_send_reply(request, HTTP_OK, "OK", buf);
evbuffer_free(buf);
}
static void _http_callback_static(struct evhttp_request *request, void *v_server) {
us_server_s *const server = (us_server_s *)v_server;
us_server_s *const server = v_server;
PREPROCESS_REQUEST;
COMPAT_REQUEST;
@@ -341,7 +392,7 @@ static void _http_callback_static(struct evhttp_request *request, void *v_server
if ((uri = evhttp_uri_parse(evhttp_request_get_uri(request))) == NULL) {
goto bad_request;
}
if ((uri_path = (char *)evhttp_uri_get_path(uri)) == NULL) {
if ((uri_path = (char*)evhttp_uri_get_path(uri)) == NULL) {
uri_path = "/";
}
if ((decoded_path = evhttp_uridecode(uri_path, 0, NULL)) == NULL) {
@@ -356,19 +407,18 @@ static void _http_callback_static(struct evhttp_request *request, void *v_server
}
if ((fd = open(static_path, O_RDONLY)) < 0) {
US_LOG_PERROR("HTTP: Can't open found static file %s", static_path);
_S_LOG_PERROR("Can't open found static file %s", static_path);
goto not_found;
}
{
struct stat st;
if (fstat(fd, &st) < 0) {
US_LOG_PERROR("HTTP: Can't stat() found static file %s", static_path);
_S_LOG_PERROR("Can't stat() found static file %s", static_path);
goto not_found;
}
if (st.st_size > 0 && evbuffer_add_file(buf, fd, 0, st.st_size) < 0) {
US_LOG_ERROR("HTTP: Can't serve static file %s", static_path);
_S_LOG_ERROR("Can't serve static file %s", static_path);
goto not_found;
}
@@ -376,7 +426,7 @@ static void _http_callback_static(struct evhttp_request *request, void *v_server
// and will close it when finished transferring data
fd = -1;
ADD_HEADER("Content-Type", us_guess_mime_type(static_path));
_A_ADD_HEADER(request, "Content-Type", us_guess_mime_type(static_path));
evhttp_send_reply(request, HTTP_OK, "OK", buf);
goto cleanup;
}
@@ -400,7 +450,7 @@ cleanup:
#undef COMPAT_REQUEST
static void _http_callback_state(struct evhttp_request *request, void *v_server) {
us_server_s *const server = (us_server_s *)v_server;
us_server_s *const server = v_server;
us_server_runtime_s *const run = server->run;
us_server_exposed_s *const ex = run->exposed;
us_stream_s *const stream = server->stream;
@@ -408,7 +458,7 @@ static void _http_callback_state(struct evhttp_request *request, void *v_server)
PREPROCESS_REQUEST;
us_encoder_type_e enc_type;
unsigned enc_quality;
uint enc_quality;
us_encoder_get_runtime_params(stream->enc, &enc_type, &enc_quality);
struct evbuffer *buf;
@@ -432,33 +482,38 @@ static void _http_callback_state(struct evhttp_request *request, void *v_server)
);
}
if (stream->sink != NULL || stream->h264_sink != NULL) {
if (stream->jpeg_sink != NULL || stream->h264_sink != NULL) {
_A_EVBUFFER_ADD_PRINTF(buf, " \"sinks\": {");
if (stream->sink != NULL) {
if (stream->jpeg_sink != NULL) {
_A_EVBUFFER_ADD_PRINTF(buf,
"\"jpeg\": {\"has_clients\": %s}",
us_bool_to_string(atomic_load(&stream->sink->has_clients))
us_bool_to_string(atomic_load(&stream->jpeg_sink->has_clients))
);
}
if (stream->h264_sink != NULL) {
_A_EVBUFFER_ADD_PRINTF(buf,
"%s\"h264\": {\"has_clients\": %s}",
(stream->sink ? ", " : ""),
(stream->jpeg_sink ? ", " : ""),
us_bool_to_string(atomic_load(&stream->h264_sink->has_clients))
);
}
_A_EVBUFFER_ADD_PRINTF(buf, "},");
}
uint width;
uint height;
bool online;
uint captured_fps;
us_stream_get_capture_state(stream, &width, &height, &online, &captured_fps);
_A_EVBUFFER_ADD_PRINTF(buf,
" \"source\": {\"resolution\": {\"width\": %u, \"height\": %u},"
" \"online\": %s, \"desired_fps\": %u, \"captured_fps\": %u},"
" \"stream\": {\"queued_fps\": %u, \"clients\": %u, \"clients_stat\": {",
(server->fake_width ? server->fake_width : ex->frame->width),
(server->fake_height ? server->fake_height : ex->frame->height),
us_bool_to_string(ex->frame->online),
(server->fake_width ? server->fake_width : width),
(server->fake_height ? server->fake_height : height),
us_bool_to_string(online),
stream->dev->desired_fps,
atomic_load(&stream->run->captured_fps),
captured_fps,
ex->queued_fps,
run->stream_clients_count
);
@@ -480,62 +535,26 @@ static void _http_callback_state(struct evhttp_request *request, void *v_server)
_A_EVBUFFER_ADD_PRINTF(buf, "}}}}");
ADD_HEADER("Content-Type", "application/json");
_A_ADD_HEADER(request, "Content-Type", "application/json");
evhttp_send_reply(request, HTTP_OK, "OK", buf);
evbuffer_free(buf);
}
static void _http_callback_snapshot(struct evhttp_request *request, void *v_server) {
us_server_s *const server = (us_server_s *)v_server;
us_server_exposed_s *const ex = server->run->exposed;
us_server_s *const server = v_server;
PREPROCESS_REQUEST;
struct evbuffer *buf;
_A_EVBUFFER_NEW(buf);
_A_EVBUFFER_ADD(buf, (const void*)ex->frame->data, ex->frame->used);
us_snapshot_client_s *client;
US_CALLOC(client, 1);
client->server = server;
client->request = request;
client->request_ts = us_get_now_monotonic();
ADD_HEADER("Cache-Control", "no-store, no-cache, must-revalidate, proxy-revalidate, pre-check=0, post-check=0, max-age=0");
ADD_HEADER("Pragma", "no-cache");
ADD_HEADER("Expires", "Mon, 3 Jan 2000 12:34:56 GMT");
char header_buf[256];
# define ADD_TIME_HEADER(x_key, x_value) { \
US_SNPRINTF(header_buf, 255, "%.06Lf", x_value); \
ADD_HEADER(x_key, header_buf); \
}
# define ADD_UNSIGNED_HEADER(x_key, x_value) { \
US_SNPRINTF(header_buf, 255, "%u", x_value); \
ADD_HEADER(x_key, header_buf); \
}
ADD_TIME_HEADER("X-Timestamp", us_get_now_real());
ADD_HEADER("X-UStreamer-Online", us_bool_to_string(ex->frame->online));
ADD_UNSIGNED_HEADER("X-UStreamer-Dropped", ex->dropped);
ADD_UNSIGNED_HEADER("X-UStreamer-Width", ex->frame->width);
ADD_UNSIGNED_HEADER("X-UStreamer-Height", ex->frame->height);
ADD_TIME_HEADER("X-UStreamer-Grab-Timestamp", ex->frame->grab_ts);
ADD_TIME_HEADER("X-UStreamer-Encode-Begin-Timestamp", ex->frame->encode_begin_ts);
ADD_TIME_HEADER("X-UStreamer-Encode-End-Timestamp", ex->frame->encode_end_ts);
ADD_TIME_HEADER("X-UStreamer-Expose-Begin-Timestamp", ex->expose_begin_ts);
ADD_TIME_HEADER("X-UStreamer-Expose-Cmp-Timestamp", ex->expose_cmp_ts);
ADD_TIME_HEADER("X-UStreamer-Expose-End-Timestamp", ex->expose_end_ts);
ADD_TIME_HEADER("X-UStreamer-Send-Timestamp", us_get_now_monotonic());
# undef ADD_UNSUGNED_HEADER
# undef ADD_TIME_HEADER
ADD_HEADER("Content-Type", "image/jpeg");
evhttp_send_reply(request, HTTP_OK, "OK", buf);
evbuffer_free(buf);
atomic_fetch_add(&server->stream->run->http_snapshot_requested, 1);
US_LIST_APPEND(server->run->snapshot_clients, client);
}
#undef ADD_HEADER
static void _http_callback_stream(struct evhttp_request *request, void *v_server) {
// https://github.com/libevent/libevent/blob/29cc8386a2f7911eaa9336692a2c5544d8b4734f/http.c#L2814
// https://github.com/libevent/libevent/blob/29cc8386a2f7911eaa9336692a2c5544d8b4734f/http.c#L2789
@@ -543,7 +562,7 @@ static void _http_callback_stream(struct evhttp_request *request, void *v_server
// https://github.com/libevent/libevent/blob/29cc8386a2f7911eaa9336692a2c5544d8b4734f/http.c#L791
// https://github.com/libevent/libevent/blob/29cc8386a2f7911eaa9336692a2c5544d8b4734f/http.c#L1458
us_server_s *const server = (us_server_s *)v_server;
us_server_s *const server = v_server;
us_server_runtime_s *const run = server->run;
PREPROCESS_REQUEST;
@@ -580,20 +599,20 @@ static void _http_callback_stream(struct evhttp_request *request, void *v_server
# endif
}
US_LOG_INFO("HTTP: NEW client (now=%u): %s, id=%" PRIx64,
_S_LOG_INFO("NEW client (now=%u): %s, id=%" PRIx64,
run->stream_clients_count, client->hostport, client->id);
struct bufferevent *const buf_event = evhttp_connection_get_bufferevent(conn);
if (server->tcp_nodelay && run->ext_fd >= 0) {
US_LOG_DEBUG("HTTP: Setting up TCP_NODELAY to the client %s ...", client->hostport);
_S_LOG_DEBUG("Setting up TCP_NODELAY to the client %s ...", client->hostport);
const evutil_socket_t fd = bufferevent_getfd(buf_event);
assert(fd >= 0);
int on = 1;
if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (void *)&on, sizeof(on)) != 0) {
US_LOG_PERROR("HTTP: Can't set TCP_NODELAY to the client %s", client->hostport);
if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (void*)&on, sizeof(on)) != 0) {
_S_LOG_PERROR("Can't set TCP_NODELAY to the client %s", client->hostport);
}
}
bufferevent_setcb(buf_event, NULL, NULL, _http_callback_stream_error, (void *)client);
bufferevent_setcb(buf_event, NULL, NULL, _http_callback_stream_error, (void*)client);
bufferevent_enable(buf_event, EV_READ);
} else {
evhttp_request_free(request);
@@ -603,17 +622,17 @@ static void _http_callback_stream(struct evhttp_request *request, void *v_server
#undef PREPROCESS_REQUEST
static void _http_callback_stream_write(struct bufferevent *buf_event, void *v_client) {
us_stream_client_s *const client = (us_stream_client_s *)v_client;
us_stream_client_s *const client = v_client;
us_server_s *const server = client->server;
us_server_exposed_s *const ex = server->run->exposed;
const long double now = us_get_now_monotonic();
const long long now_second = us_floor_ms(now);
const ldf now_ts = us_get_now_monotonic();
const sll now_sec_ts = us_floor_ms(now_ts);
if (now_second != client->fps_accum_second) {
if (now_sec_ts != client->fps_ts) {
client->fps = client->fps_accum;
client->fps_accum = 0;
client->fps_accum_second = now_second;
client->fps_ts = now_sec_ts;
}
client->fps_accum += 1;
@@ -724,8 +743,8 @@ static void _http_callback_stream_write(struct bufferevent *buf_event, void *v_c
ex->expose_begin_ts,
ex->expose_cmp_ts,
ex->expose_end_ts,
now,
now - ex->frame->grab_ts
now_ts,
now_ts - ex->frame->grab_ts
);
}
}
@@ -742,7 +761,7 @@ static void _http_callback_stream_write(struct bufferevent *buf_event, void *v_c
assert(!bufferevent_write_buffer(buf_event, buf));
evbuffer_free(buf);
bufferevent_setcb(buf_event, NULL, NULL, _http_callback_stream_error, (void *)client);
bufferevent_setcb(buf_event, NULL, NULL, _http_callback_stream_error, (void*)client);
bufferevent_enable(buf_event, EV_READ);
# undef ADD_ADVANCE_HEADERS
@@ -753,7 +772,7 @@ static void _http_callback_stream_error(struct bufferevent *buf_event, short wha
(void)buf_event;
(void)what;
us_stream_client_s *const client = (us_stream_client_s *)v_client;
us_stream_client_s *const client = v_client;
us_server_s *const server = client->server;
us_server_runtime_s *const run = server->run;
@@ -767,7 +786,7 @@ static void _http_callback_stream_error(struct bufferevent *buf_event, short wha
}
char *const reason = us_bufferevent_format_reason(what);
US_LOG_INFO("HTTP: DEL client (now=%u): %s, id=%" PRIx64 ", %s",
_S_LOG_INFO("DEL client (now=%u): %s, id=%" PRIx64 ", %s",
run->stream_clients_count, client->hostport, client->id, reason);
free(reason);
@@ -779,7 +798,7 @@ static void _http_callback_stream_error(struct bufferevent *buf_event, short wha
free(client);
}
static void _http_queue_send_stream(us_server_s *server, bool stream_updated, bool frame_updated) {
static void _http_send_stream(us_server_s *server, bool stream_updated, bool frame_updated) {
us_server_runtime_s *const run = server->run;
us_server_exposed_s *const ex = run->exposed;
@@ -805,7 +824,7 @@ static void _http_queue_send_stream(us_server_s *server, bool stream_updated, bo
if (dual_update || frame_updated || client->need_first_frame) {
struct bufferevent *const buf_event = evhttp_connection_get_bufferevent(conn);
bufferevent_setcb(buf_event, NULL, _http_callback_stream_write, _http_callback_stream_error, (void *)client);
bufferevent_setcb(buf_event, NULL, _http_callback_stream_write, _http_callback_stream_error, (void*)client);
bufferevent_enable(buf_event, EV_READ|EV_WRITE);
client->need_first_frame = false;
@@ -820,13 +839,13 @@ static void _http_queue_send_stream(us_server_s *server, bool stream_updated, bo
});
if (queued) {
static unsigned queued_fps_accum = 0;
static long long queued_fps_second = 0;
const long long now = us_floor_ms(us_get_now_monotonic());
if (now != queued_fps_second) {
static uint queued_fps_accum = 0;
static sll queued_fps_ts = 0;
const sll now_sec_ts = us_floor_ms(us_get_now_monotonic());
if (now_sec_ts != queued_fps_ts) {
ex->queued_fps = queued_fps_accum;
queued_fps_accum = 0;
queued_fps_second = now;
queued_fps_ts = now_sec_ts;
}
queued_fps_accum += 1;
} else if (!has_clients) {
@@ -834,29 +853,82 @@ static void _http_queue_send_stream(us_server_s *server, bool stream_updated, bo
}
}
static void _http_request_watcher(int fd, short what, void *v_server) {
(void)fd;
(void)what;
static void _http_send_snapshot(us_server_s *server) {
us_server_exposed_s *const ex = server->run->exposed;
us_blank_s *blank = NULL;
us_server_s *const server = (us_server_s *)v_server;
us_server_runtime_s *const run = server->run;
const long double now = us_get_now_monotonic();
# define ADD_TIME_HEADER(x_key, x_value) { \
US_SNPRINTF(header_buf, 255, "%.06Lf", x_value); \
_A_ADD_HEADER(request, x_key, header_buf); \
}
if (us_stream_has_clients(server->stream)) {
run->last_request_ts = now;
} else if (run->last_request_ts + server->exit_on_no_clients < now) {
US_LOG_INFO("HTTP: No requests or HTTP/sink clients found in last %u seconds, exiting ...",
server->exit_on_no_clients);
us_process_suicide();
run->last_request_ts = now;
}
# define ADD_UNSIGNED_HEADER(x_key, x_value) { \
US_SNPRINTF(header_buf, 255, "%u", x_value); \
_A_ADD_HEADER(request, x_key, header_buf); \
}
uint width;
uint height;
uint captured_fps; // Unused
bool online;
us_stream_get_capture_state(server->stream, &width, &height, &online, &captured_fps);
US_LIST_ITERATE(server->run->snapshot_clients, client, { // cppcheck-suppress constStatement
struct evhttp_request *request = client->request;
const bool has_fresh_snapshot = (atomic_load(&server->stream->run->http_snapshot_requested) == 0);
const bool timed_out = (client->request_ts + US_MAX((uint)1, server->stream->error_delay * 3) < us_get_now_monotonic());
if (has_fresh_snapshot || timed_out) {
us_frame_s *frame = ex->frame;
if (!online) {
if (blank == NULL) {
blank = us_blank_init();
us_blank_draw(blank, "< NO SIGNAL >", width, height);
}
frame = blank->jpeg;
}
struct evbuffer *buf;
_A_EVBUFFER_NEW(buf);
_A_EVBUFFER_ADD(buf, (const void*)frame->data, frame->used);
_A_ADD_HEADER(request, "Cache-Control", "no-store, no-cache, must-revalidate, proxy-revalidate, pre-check=0, post-check=0, max-age=0");
_A_ADD_HEADER(request, "Pragma", "no-cache");
_A_ADD_HEADER(request, "Expires", "Mon, 3 Jan 2000 12:34:56 GMT");
char header_buf[256];
ADD_TIME_HEADER("X-Timestamp", us_get_now_real());
_A_ADD_HEADER(request, "X-UStreamer-Online", us_bool_to_string(frame->online));
ADD_UNSIGNED_HEADER("X-UStreamer-Width", frame->width);
ADD_UNSIGNED_HEADER("X-UStreamer-Height", frame->height);
ADD_TIME_HEADER("X-UStreamer-Grab-Timestamp", frame->grab_ts);
ADD_TIME_HEADER("X-UStreamer-Encode-Begin-Timestamp", frame->encode_begin_ts);
ADD_TIME_HEADER("X-UStreamer-Encode-End-Timestamp", frame->encode_end_ts);
ADD_TIME_HEADER("X-UStreamer-Send-Timestamp", us_get_now_monotonic());
_A_ADD_HEADER(request, "Content-Type", "image/jpeg");
evhttp_send_reply(request, HTTP_OK, "OK", buf);
evbuffer_free(buf);
US_LIST_REMOVE(server->run->snapshot_clients, client);
free(client);
}
});
# undef ADD_UNSUGNED_HEADER
# undef ADD_TIME_HEADER
US_DELETE(blank, us_blank_destroy);
}
static void _http_refresher(int fd, short what, void *v_server) {
(void)fd;
(void)what;
us_server_s *server = (us_server_s *)v_server;
us_server_s *server = v_server;
us_server_exposed_s *ex = server->run->exposed;
us_ring_s *const ring = server->stream->run->http_jpeg_ring;
@@ -870,7 +942,7 @@ static void _http_refresher(int fd, short what, void *v_server) {
stream_updated = true;
us_ring_consumer_release(ring, ri);
} else if (ex->expose_end_ts + 1 < us_get_now_monotonic()) {
US_LOG_DEBUG("HTTP: Repeating exposed ...");
_S_LOG_DEBUG("Repeating exposed ...");
ex->expose_begin_ts = us_get_now_monotonic();
ex->expose_cmp_ts = ex->expose_begin_ts;
ex->expose_end_ts = ex->expose_begin_ts;
@@ -878,7 +950,8 @@ static void _http_refresher(int fd, short what, void *v_server) {
stream_updated = true;
}
_http_queue_send_stream(server, stream_updated, frame_updated);
_http_send_stream(server, stream_updated, frame_updated);
_http_send_snapshot(server);
if (
frame_updated
@@ -899,7 +972,7 @@ static void _http_refresher(int fd, short what, void *v_server) {
static bool _expose_frame(us_server_s *server, const us_frame_s *frame) {
us_server_exposed_s *const ex = server->run->exposed;
US_LOG_DEBUG("HTTP: Updating exposed frame (online=%d) ...", frame->online);
_S_LOG_DEBUG("Updating exposed frame (online=%d) ...", frame->online);
ex->expose_begin_ts = us_get_now_monotonic();
if (server->drop_same_frames && frame->online) {
@@ -911,13 +984,13 @@ static bool _expose_frame(us_server_s *server, const us_frame_s *frame) {
) {
ex->expose_cmp_ts = us_get_now_monotonic();
ex->expose_end_ts = ex->expose_cmp_ts;
US_LOG_VERBOSE("HTTP: Dropped same frame number %u; cmp_time=%.06Lf",
_S_LOG_VERBOSE("Dropped same frame number %u; cmp_time=%.06Lf",
ex->dropped, (ex->expose_cmp_ts - ex->expose_begin_ts));
ex->dropped += 1;
return false; // Not updated
} else {
ex->expose_cmp_ts = us_get_now_monotonic();
US_LOG_VERBOSE("HTTP: Passed same frame check (need_drop=%d, maybe_same=%d); cmp_time=%.06Lf",
_S_LOG_VERBOSE("Passed same frame check (need_drop=%d, maybe_same=%d); cmp_time=%.06Lf",
need_drop, maybe_same, (ex->expose_cmp_ts - ex->expose_begin_ts));
}
}
@@ -934,7 +1007,7 @@ static bool _expose_frame(us_server_s *server, const us_frame_s *frame) {
ex->expose_cmp_ts = ex->expose_begin_ts;
ex->expose_end_ts = us_get_now_monotonic();
US_LOG_VERBOSE("HTTP: Exposed frame: online=%d, exp_time=%.06Lf",
_S_LOG_VERBOSE("Exposed frame: online=%d, exp_time=%.06Lf",
ex->frame->online, (ex->expose_end_ts - ex->expose_begin_ts));
return true; // Updated
}
@@ -957,7 +1030,7 @@ static char *_http_get_client_hostport(struct evhttp_request *request) {
if (xff != NULL) {
US_DELETE(addr, free);
assert((addr = strndup(xff, 1024)) != NULL);
for (unsigned index = 0; addr[index]; ++index) {
for (uint index = 0; addr[index]; ++index) {
if (addr[index] == ',') {
addr[index] = '\0';
break;

View File

@@ -22,95 +22,61 @@
#pragma once
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdbool.h>
#include <stdatomic.h>
#include <string.h>
#include <inttypes.h>
#include <unistd.h>
#include <fcntl.h>
#include <assert.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <netinet/tcp.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <event2/util.h>
#include <event2/event.h>
#include <event2/thread.h>
#include <event2/http.h>
#include <event2/buffer.h>
#include <event2/bufferevent.h>
#include <event2/keyvalq_struct.h>
#ifndef EVTHREAD_USE_PTHREADS_IMPLEMENTED
# error Required libevent-pthreads support
#endif
#include "../../libs/tools.h"
#include "../../libs/threading.h"
#include "../../libs/logging.h"
#include "../../libs/process.h"
#include "../../libs/types.h"
#include "../../libs/frame.h"
#include "../../libs/base64.h"
#include "../../libs/list.h"
#include "../data/index_html.h"
#include "../data/favicon_ico.h"
#include "../encoder.h"
#include "../stream.h"
#ifdef WITH_GPIO
# include "../gpio/gpio.h"
#endif
#include "bev.h"
#include "unix.h"
#include "uri.h"
#include "mime.h"
#include "static.h"
#ifdef WITH_SYSTEMD
# include "systemd/systemd.h"
#endif
typedef struct us_stream_client_sx {
struct us_server_sx *server;
struct evhttp_request *request;
char *key;
bool extra_headers;
bool advance_headers;
bool dual_final_frames;
bool zero_data;
char *key;
bool extra_headers;
bool advance_headers;
bool dual_final_frames;
bool zero_data;
char *hostport;
uint64_t id;
bool need_initial;
bool need_first_frame;
bool updated_prev;
unsigned fps;
unsigned fps_accum;
long long fps_accum_second;
char *hostport;
u64 id;
bool need_initial;
bool need_first_frame;
bool updated_prev;
uint fps_accum;
sll fps_ts;
uint fps;
US_LIST_STRUCT(struct us_stream_client_sx);
} us_stream_client_s;
typedef struct {
us_frame_s *frame;
unsigned captured_fps;
unsigned queued_fps;
unsigned dropped;
long double expose_begin_ts;
long double expose_cmp_ts;
long double expose_end_ts;
typedef struct us_snapshot_client_sx {
struct us_server_sx *server;
struct evhttp_request *request;
ldf request_ts;
bool notify_last_online;
unsigned notify_last_width;
unsigned notify_last_height;
US_LIST_STRUCT(struct us_snapshot_client_sx);
} us_snapshot_client_s;
typedef struct {
us_frame_s *frame;
uint captured_fps;
uint queued_fps;
uint dropped;
ldf expose_begin_ts;
ldf expose_cmp_ts;
ldf expose_end_ts;
bool notify_last_online;
uint notify_last_width;
uint notify_last_height;
} us_server_exposed_s;
typedef struct {
@@ -120,45 +86,43 @@ typedef struct {
char *auth_token;
struct event *request_watcher;
long double last_request_ts;
struct event *refresher;
us_server_exposed_s *exposed;
us_stream_client_s *stream_clients;
unsigned stream_clients_count;
uint stream_clients_count;
us_snapshot_client_s *snapshot_clients;
} us_server_runtime_s;
typedef struct us_server_sx {
char *host;
unsigned port;
us_stream_s *stream;
char *unix_path;
bool unix_rm;
mode_t unix_mode;
char *host;
uint port;
char *unix_path;
bool unix_rm;
mode_t unix_mode;
# ifdef WITH_SYSTEMD
bool systemd;
bool systemd;
# endif
bool tcp_nodelay;
unsigned timeout;
bool tcp_nodelay;
uint timeout;
char *user;
char *passwd;
char *static_path;
char *allow_origin;
char *instance_id;
char *user;
char *passwd;
char *static_path;
char *allow_origin;
char *instance_id;
unsigned drop_same_frames;
unsigned fake_width;
unsigned fake_height;
uint drop_same_frames;
uint fake_width;
uint fake_height;
bool notify_parent;
unsigned exit_on_no_clients;
us_stream_s *stream;
bool notify_parent;
us_server_runtime_s *run;
} us_server_s;

View File

@@ -22,6 +22,19 @@
#include "static.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <assert.h>
#include <sys/stat.h>
#include "../../libs/tools.h"
#include "../../libs/logging.h"
#include "path.h"
char *us_find_static_file_path(const char *root_path, const char *request_path) {
char *path = NULL;

View File

@@ -22,18 +22,5 @@
#pragma once
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <assert.h>
#include <sys/stat.h>
#include "../../libs/tools.h"
#include "../../libs/logging.h"
#include "path.h"
char *us_find_static_file_path(const char *root_path, const char *request_path);

View File

@@ -22,11 +22,22 @@
#include "systemd.h"
#include <unistd.h>
#include <assert.h>
#include <event2/http.h>
#include <event2/util.h>
#include <systemd/sd-daemon.h>
#include "../../../libs/tools.h"
#include "../../../libs/logging.h"
evutil_socket_t us_evhttp_bind_systemd(struct evhttp *http) {
const int fds = sd_listen_fds(1);
if (fds < 1) {
US_LOG_ERROR("No available systemd sockets");
US_LOG_ERROR("HTTP: No available systemd sockets");
return -1;
}
@@ -39,7 +50,7 @@ evutil_socket_t us_evhttp_bind_systemd(struct evhttp *http) {
assert(!evutil_make_socket_nonblocking(fd));
if (evhttp_accept_socket(http, fd) < 0) {
US_LOG_PERROR("Can't evhttp_accept_socket() systemd socket");
US_LOG_PERROR("HTTP: Can't evhttp_accept_socket() systemd socket");
return -1;
}
return fd;

View File

@@ -22,16 +22,8 @@
#pragma once
#include <unistd.h>
#include <assert.h>
#include <event2/http.h>
#include <event2/util.h>
#include <systemd/sd-daemon.h>
#include "../../../libs/tools.h"
#include "../../../libs/logging.h"
evutil_socket_t us_evhttp_bind_systemd(struct evhttp *http);

View File

@@ -22,13 +22,29 @@
#include "unix.h"
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <sys/stat.h>
#include <event2/http.h>
#include <event2/util.h>
#include "../../libs/types.h"
#include "../../libs/tools.h"
#include "../../libs/logging.h"
evutil_socket_t us_evhttp_bind_unix(struct evhttp *http, const char *path, bool rm, mode_t mode) {
struct sockaddr_un addr = {0};
const size_t max_sun_path = sizeof(addr.sun_path) - 1;
const uz max_sun_path = sizeof(addr.sun_path) - 1;
if (strlen(path) > max_sun_path) {
US_LOG_ERROR("UNIX socket path is too long; max=%zu", max_sun_path);
US_LOG_ERROR("HTTP: UNIX socket path is too long; max=%zu", max_sun_path);
return -1;
}
@@ -41,24 +57,24 @@ evutil_socket_t us_evhttp_bind_unix(struct evhttp *http, const char *path, bool
if (rm && unlink(path) < 0) {
if (errno != ENOENT) {
US_LOG_PERROR("Can't remove old UNIX socket '%s'", path);
US_LOG_PERROR("HTTP: Can't remove old UNIX socket '%s'", path);
return -1;
}
}
if (bind(fd, (struct sockaddr *)&addr, sizeof(struct sockaddr_un)) < 0) {
US_LOG_PERROR("Can't bind HTTP to UNIX socket '%s'", path);
if (bind(fd, (struct sockaddr*)&addr, sizeof(struct sockaddr_un)) < 0) {
US_LOG_PERROR("HTTP: Can't bind HTTP to UNIX socket '%s'", path);
return -1;
}
if (mode && chmod(path, mode) < 0) {
US_LOG_PERROR("Can't set permissions %o to UNIX socket '%s'", mode, path);
US_LOG_PERROR("HTTP: Can't set permissions %o to UNIX socket '%s'", mode, path);
return -1;
}
if (listen(fd, 128) < 0) {
US_LOG_PERROR("Can't listen UNIX socket '%s'", path);
US_LOG_PERROR("HTTP: Can't listen UNIX socket '%s'", path);
return -1;
}
if (evhttp_accept_socket(http, fd) < 0) {
US_LOG_PERROR("Can't evhttp_accept_socket() UNIX socket '%s'", path);
US_LOG_PERROR("HTTP: Can't evhttp_accept_socket() UNIX socket '%s'", path);
return -1;
}
return fd;

View File

@@ -22,21 +22,12 @@
#pragma once
#include <stdbool.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <sys/stat.h>
#include <event2/http.h>
#include <event2/util.h>
#include "../../libs/tools.h"
#include "../../libs/logging.h"
#include "../../libs/types.h"
evutil_socket_t us_evhttp_bind_unix(struct evhttp *http, const char *path, bool rm, mode_t mode);

View File

@@ -22,6 +22,12 @@
#include "uri.h"
#include <event2/util.h>
#include <event2/http.h>
#include <event2/keyvalq_struct.h>
#include "../../libs/types.h"
bool us_uri_get_true(struct evkeyvalq *params, const char *key) {
const char *value_str = evhttp_find_header(params, key);

View File

@@ -22,12 +22,10 @@
#pragma once
#include <stdbool.h>
#include <event2/util.h>
#include <event2/http.h>
#include <event2/keyvalq_struct.h>
#include "../../libs/types.h"
bool us_uri_get_true(struct evkeyvalq *params, const char *key);
char *us_uri_get_string(struct evkeyvalq *params, const char *key);

View File

@@ -22,16 +22,34 @@
#include "m2m.h"
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <fcntl.h>
#include <poll.h>
#include <errno.h>
#include <assert.h>
#include <sys/mman.h>
#include <linux/videodev2.h>
#include "../libs/types.h"
#include "../libs/tools.h"
#include "../libs/logging.h"
#include "../libs/frame.h"
#include "../libs/xioctl.h"
static us_m2m_encoder_s *_m2m_encoder_init(
const char *name, const char *path, unsigned output_format,
unsigned fps, unsigned bitrate, unsigned gop, unsigned quality, bool allow_dma);
const char *name, const char *path, uint output_format,
uint bitrate, uint gop, uint quality, bool allow_dma);
static void _m2m_encoder_prepare(us_m2m_encoder_s *enc, const us_frame_s *frame);
static void _m2m_encoder_ensure(us_m2m_encoder_s *enc, const us_frame_s *frame);
static int _m2m_encoder_init_buffers(
us_m2m_encoder_s *enc, const char *name, enum v4l2_buf_type type,
us_m2m_buffer_s **bufs_ptr, unsigned *n_bufs_ptr, bool dma);
us_m2m_buffer_s **bufs_ptr, uint *n_bufs_ptr, bool dma);
static void _m2m_encoder_cleanup(us_m2m_encoder_s *enc);
@@ -44,18 +62,13 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
#define _E_LOG_VERBOSE(x_msg, ...) US_LOG_VERBOSE("%s: " x_msg, enc->name, ##__VA_ARGS__)
#define _E_LOG_DEBUG(x_msg, ...) US_LOG_DEBUG("%s: " x_msg, enc->name, ##__VA_ARGS__)
#define _RUN(x_next) enc->run->x_next
us_m2m_encoder_s *us_m2m_h264_encoder_init(const char *name, const char *path, unsigned bitrate, unsigned gop) {
// FIXME: 30 or 0? https://github.com/6by9/yavta/blob/master/yavta.c#L2100
// По логике вещей правильно 0, но почему-то на низких разрешениях типа 640x480
// енкодер через несколько секунд перестает производить корректные фреймы.
us_m2m_encoder_s *us_m2m_h264_encoder_init(const char *name, const char *path, uint bitrate, uint gop) {
bitrate *= 1000; // From Kbps
return _m2m_encoder_init(name, path, V4L2_PIX_FMT_H264, 30, bitrate, gop, 0, true);
return _m2m_encoder_init(name, path, V4L2_PIX_FMT_H264, bitrate, gop, 0, true);
}
us_m2m_encoder_s *us_m2m_mjpeg_encoder_init(const char *name, const char *path, unsigned quality) {
us_m2m_encoder_s *us_m2m_mjpeg_encoder_init(const char *name, const char *path, uint quality) {
const double b_min = 25;
const double b_max = 20000;
const double step = 25;
@@ -63,13 +76,12 @@ us_m2m_encoder_s *us_m2m_mjpeg_encoder_init(const char *name, const char *path,
bitrate = step * round(bitrate / step);
bitrate *= 1000; // From Kbps
assert(bitrate > 0);
// FIXME: То же самое про 30 or 0, но еще даже не проверено на низких разрешениях
return _m2m_encoder_init(name, path, V4L2_PIX_FMT_MJPEG, 30, bitrate, 0, 0, true);
return _m2m_encoder_init(name, path, V4L2_PIX_FMT_MJPEG, bitrate, 0, 0, true);
}
us_m2m_encoder_s *us_m2m_jpeg_encoder_init(const char *name, const char *path, unsigned quality) {
us_m2m_encoder_s *us_m2m_jpeg_encoder_init(const char *name, const char *path, uint quality) {
// FIXME: DMA не работает
return _m2m_encoder_init(name, path, V4L2_PIX_FMT_JPEG, 30, 0, 0, quality, false);
return _m2m_encoder_init(name, path, V4L2_PIX_FMT_JPEG, 0, 0, quality, false);
}
void us_m2m_encoder_destroy(us_m2m_encoder_s *enc) {
@@ -81,22 +93,18 @@ void us_m2m_encoder_destroy(us_m2m_encoder_s *enc) {
}
int us_m2m_encoder_compress(us_m2m_encoder_s *enc, const us_frame_s *src, us_frame_s *dest, bool force_key) {
us_m2m_encoder_runtime_s *const run = enc->run;
us_frame_encoding_begin(src, dest, (enc->output_format == V4L2_PIX_FMT_MJPEG ? V4L2_PIX_FMT_JPEG : enc->output_format));
if (
_RUN(width) != src->width
|| _RUN(height) != src->height
|| _RUN(input_format) != src->format
|| _RUN(stride) != src->stride
|| _RUN(dma) != (enc->allow_dma && src->dma_fd >= 0)
) {
_m2m_encoder_prepare(enc, src);
}
if (!_RUN(ready)) { // Already prepared but failed
_m2m_encoder_ensure(enc, src);
if (!run->ready) { // Already prepared but failed
return -1;
}
force_key = (enc->output_format == V4L2_PIX_FMT_H264 && (force_key || _RUN(last_online) != src->online));
force_key = (enc->output_format == V4L2_PIX_FMT_H264 && (force_key || run->last_online != src->online));
_E_LOG_DEBUG("Compressing new frame; force_key=%d ...", force_key);
if (_m2m_encoder_compress_raw(enc, src, dest, force_key) < 0) {
_m2m_encoder_cleanup(enc);
@@ -109,13 +117,13 @@ int us_m2m_encoder_compress(us_m2m_encoder_s *enc, const us_frame_s *src, us_fra
_E_LOG_VERBOSE("Compressed new frame: size=%zu, time=%0.3Lf, force_key=%d",
dest->used, dest->encode_end_ts - dest->encode_begin_ts, force_key);
_RUN(last_online) = src->online;
run->last_online = src->online;
return 0;
}
static us_m2m_encoder_s *_m2m_encoder_init(
const char *name, const char *path, unsigned output_format,
unsigned fps, unsigned bitrate, unsigned gop, unsigned quality, bool allow_dma) {
const char *name, const char *path, uint output_format,
uint bitrate, uint gop, uint quality, bool allow_dma) {
US_LOG_INFO("%s: Initializing encoder ...", name);
@@ -133,7 +141,6 @@ static us_m2m_encoder_s *_m2m_encoder_init(
enc->path = us_strdup(path);
}
enc->output_format = output_format;
enc->fps = fps;
enc->bitrate = bitrate;
enc->gop = gop;
enc->quality = quality;
@@ -143,30 +150,49 @@ static us_m2m_encoder_s *_m2m_encoder_init(
}
#define _E_XIOCTL(x_request, x_value, x_msg, ...) { \
if (us_xioctl(_RUN(fd), x_request, x_value) < 0) { \
if (us_xioctl(run->fd, x_request, x_value) < 0) { \
_E_LOG_PERROR(x_msg, ##__VA_ARGS__); \
goto error; \
} \
}
static void _m2m_encoder_prepare(us_m2m_encoder_s *enc, const us_frame_s *frame) {
static void _m2m_encoder_ensure(us_m2m_encoder_s *enc, const us_frame_s *frame) {
us_m2m_encoder_runtime_s *const run = enc->run;
const bool dma = (enc->allow_dma && frame->dma_fd >= 0);
if (
run->p_width == frame->width
&& run->p_height == frame->height
&& run->p_input_format == frame->format
&& run->p_stride == frame->stride
&& run->p_dma == dma
) {
return; // Configured already
}
_E_LOG_INFO("Configuring encoder: DMA=%d ...", dma);
_E_LOG_DEBUG("Encoder changes: width=%u->%u, height=%u->%u, input_format=%u->%u, stride=%u->%u, dma=%u->%u",
run->p_width, frame->width,
run->p_height, frame->height,
run->p_input_format, frame->format,
run->p_stride, frame->stride,
run->p_dma, dma);
_m2m_encoder_cleanup(enc);
_RUN(width) = frame->width;
_RUN(height) = frame->height;
_RUN(input_format) = frame->format;
_RUN(stride) = frame->stride;
_RUN(dma) = dma;
run->p_width = frame->width;
run->p_height = frame->height;
run->p_input_format = frame->format;
run->p_stride = frame->stride;
run->p_dma = dma;
if ((_RUN(fd) = open(enc->path, O_RDWR)) < 0) {
_E_LOG_DEBUG("Opening encoder device ...");
if ((run->fd = open(enc->path, O_RDWR)) < 0) {
_E_LOG_PERROR("Can't open encoder device");
goto error;
}
_E_LOG_DEBUG("Encoder device fd=%d opened", _RUN(fd));
_E_LOG_DEBUG("Encoder device fd=%d opened", run->fd);
# define SET_OPTION(x_cid, x_value) { \
struct v4l2_control m_ctl = {0}; \
@@ -175,12 +201,11 @@ static void _m2m_encoder_prepare(us_m2m_encoder_s *enc, const us_frame_s *frame)
_E_LOG_DEBUG("Configuring option " #x_cid " ..."); \
_E_XIOCTL(VIDIOC_S_CTRL, &m_ctl, "Can't set option " #x_cid); \
}
if (enc->output_format == V4L2_PIX_FMT_H264) {
SET_OPTION(V4L2_CID_MPEG_VIDEO_BITRATE, enc->bitrate);
SET_OPTION(V4L2_CID_MPEG_VIDEO_H264_I_PERIOD, enc->gop);
SET_OPTION(V4L2_CID_MPEG_VIDEO_H264_PROFILE, V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE);
if (_RUN(width) * _RUN(height) <= 1920 * 1080) { // https://forums.raspberrypi.com/viewtopic.php?t=291447#p1762296
if (run->p_width * run->p_height <= 1920 * 1080) { // https://forums.raspberrypi.com/viewtopic.php?t=291447#p1762296
SET_OPTION(V4L2_CID_MPEG_VIDEO_H264_LEVEL, V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
} else {
SET_OPTION(V4L2_CID_MPEG_VIDEO_H264_LEVEL, V4L2_MPEG_VIDEO_H264_LEVEL_5_1);
@@ -193,19 +218,18 @@ static void _m2m_encoder_prepare(us_m2m_encoder_s *enc, const us_frame_s *frame)
} else if (enc->output_format == V4L2_PIX_FMT_JPEG) {
SET_OPTION(V4L2_CID_JPEG_COMPRESSION_QUALITY, enc->quality);
}
# undef SET_OPTION
{
struct v4l2_format fmt = {0};
fmt.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
fmt.fmt.pix_mp.width = _RUN(width);
fmt.fmt.pix_mp.height = _RUN(height);
fmt.fmt.pix_mp.pixelformat = _RUN(input_format);
fmt.fmt.pix_mp.width = run->p_width;
fmt.fmt.pix_mp.height = run->p_height;
fmt.fmt.pix_mp.pixelformat = run->p_input_format;
fmt.fmt.pix_mp.field = V4L2_FIELD_ANY;
fmt.fmt.pix_mp.colorspace = V4L2_COLORSPACE_JPEG; // libcamera currently has no means to request the right colour space
fmt.fmt.pix_mp.num_planes = 1;
// fmt.fmt.pix_mp.plane_fmt[0].bytesperline = _RUN(stride);
// fmt.fmt.pix_mp.plane_fmt[0].bytesperline = run->p_stride;
_E_LOG_DEBUG("Configuring INPUT format ...");
_E_XIOCTL(VIDIOC_S_FMT, &fmt, "Can't set INPUT format");
}
@@ -213,8 +237,8 @@ static void _m2m_encoder_prepare(us_m2m_encoder_s *enc, const us_frame_s *frame)
{
struct v4l2_format fmt = {0};
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
fmt.fmt.pix_mp.width = _RUN(width);
fmt.fmt.pix_mp.height = _RUN(height);
fmt.fmt.pix_mp.width = run->p_width;
fmt.fmt.pix_mp.height = run->p_height;
fmt.fmt.pix_mp.pixelformat = enc->output_format;
fmt.fmt.pix_mp.field = V4L2_FIELD_ANY;
fmt.fmt.pix_mp.colorspace = V4L2_COLORSPACE_DEFAULT;
@@ -236,21 +260,37 @@ static void _m2m_encoder_prepare(us_m2m_encoder_s *enc, const us_frame_s *frame)
}
}
if (enc->fps > 0) { // TODO: Check this for MJPEG
if (run->p_width * run->p_height <= 1280 * 720) {
// H264 требует каких-то лимитов. Больше 30 не поддерживается, а при 0
// через какое-то время начинает производить некорректные фреймы.
// Если же привысить fps, то резко увеличивается время кодирования.
run->fps_limit = 60;
} else {
run->fps_limit = 30;
}
// H264: 30 or 0? https://github.com/6by9/yavta/blob/master/yavta.c#L2100
// По логике вещей правильно 0, но почему-то на низких разрешениях типа 640x480
// енкодер через несколько секунд перестает производить корректные фреймы.
// JPEG: То же самое про 30 or 0, но еще даже не проверено на низких разрешениях.
{
struct v4l2_streamparm setfps = {0};
setfps.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
setfps.parm.output.timeperframe.numerator = 1;
setfps.parm.output.timeperframe.denominator = enc->fps;
setfps.parm.output.timeperframe.denominator = run->fps_limit;
_E_LOG_DEBUG("Configuring INPUT FPS ...");
_E_XIOCTL(VIDIOC_S_PARM, &setfps, "Can't set INPUT FPS");
}
if (_m2m_encoder_init_buffers(enc, (dma ? "INPUT-DMA" : "INPUT"), V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
&_RUN(input_bufs), &_RUN(n_input_bufs), dma) < 0) {
if (_m2m_encoder_init_buffers(
enc, (dma ? "INPUT-DMA" : "INPUT"), V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
&run->input_bufs, &run->n_input_bufs, dma
) < 0) {
goto error;
}
if (_m2m_encoder_init_buffers(enc, "OUTPUT", V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
&_RUN(output_bufs), &_RUN(n_output_bufs), false) < 0) {
if (_m2m_encoder_init_buffers(
enc, "OUTPUT", V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
&run->output_bufs, &run->n_output_bufs, false
) < 0) {
goto error;
}
@@ -264,18 +304,20 @@ static void _m2m_encoder_prepare(us_m2m_encoder_s *enc, const us_frame_s *frame)
_E_XIOCTL(VIDIOC_STREAMON, &type, "Can't start OUTPUT");
}
_RUN(ready) = true;
_E_LOG_DEBUG("Encoder state: *** READY ***");
run->ready = true;
_E_LOG_INFO("Encoder is ready");
return;
error:
_m2m_encoder_cleanup(enc);
_E_LOG_ERROR("Encoder destroyed due an error (prepare)");
error:
_m2m_encoder_cleanup(enc);
_E_LOG_ERROR("Encoder destroyed due an error (prepare)");
}
static int _m2m_encoder_init_buffers(
us_m2m_encoder_s *enc, const char *name, enum v4l2_buf_type type,
us_m2m_buffer_s **bufs_ptr, unsigned *n_bufs_ptr, bool dma) {
us_m2m_buffer_s **bufs_ptr, uint *n_bufs_ptr, bool dma) {
us_m2m_encoder_runtime_s *const run = enc->run;
_E_LOG_DEBUG("Initializing %s buffers ...", name);
@@ -294,98 +336,102 @@ static int _m2m_encoder_init_buffers(
if (dma) {
*n_bufs_ptr = req.count;
} else {
US_CALLOC(*bufs_ptr, req.count);
for (*n_bufs_ptr = 0; *n_bufs_ptr < req.count; ++(*n_bufs_ptr)) {
struct v4l2_buffer buf = {0};
struct v4l2_plane plane = {0};
buf.type = type;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = *n_bufs_ptr;
buf.length = 1;
buf.m.planes = &plane;
_E_LOG_DEBUG("Querying %s buffer=%u ...", name, *n_bufs_ptr);
_E_XIOCTL(VIDIOC_QUERYBUF, &buf, "Can't query %s buffer=%u", name, *n_bufs_ptr);
_E_LOG_DEBUG("Mapping %s buffer=%u ...", name, *n_bufs_ptr);
if (((*bufs_ptr)[*n_bufs_ptr].data = mmap(
NULL,
plane.length,
PROT_READ | PROT_WRITE,
MAP_SHARED,
_RUN(fd),
plane.m.mem_offset
)) == MAP_FAILED) {
_E_LOG_PERROR("Can't map %s buffer=%u", name, *n_bufs_ptr);
goto error;
}
assert((*bufs_ptr)[*n_bufs_ptr].data != NULL);
(*bufs_ptr)[*n_bufs_ptr].allocated = plane.length;
_E_LOG_DEBUG("Queuing %s buffer=%u ...", name, *n_bufs_ptr);
_E_XIOCTL(VIDIOC_QBUF, &buf, "Can't queue %s buffer=%u", name, *n_bufs_ptr);
}
return 0;
}
US_CALLOC(*bufs_ptr, req.count);
for (*n_bufs_ptr = 0; *n_bufs_ptr < req.count; ++(*n_bufs_ptr)) {
struct v4l2_buffer buf = {0};
struct v4l2_plane plane = {0};
buf.type = type;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = *n_bufs_ptr;
buf.length = 1;
buf.m.planes = &plane;
_E_LOG_DEBUG("Querying %s buffer=%u ...", name, *n_bufs_ptr);
_E_XIOCTL(VIDIOC_QUERYBUF, &buf, "Can't query %s buffer=%u", name, *n_bufs_ptr);
_E_LOG_DEBUG("Mapping %s buffer=%u ...", name, *n_bufs_ptr);
if (((*bufs_ptr)[*n_bufs_ptr].data = mmap(
NULL, plane.length,
PROT_READ | PROT_WRITE, MAP_SHARED,
run->fd, plane.m.mem_offset
)) == MAP_FAILED) {
_E_LOG_PERROR("Can't map %s buffer=%u", name, *n_bufs_ptr);
goto error;
}
assert((*bufs_ptr)[*n_bufs_ptr].data != NULL);
(*bufs_ptr)[*n_bufs_ptr].allocated = plane.length;
_E_LOG_DEBUG("Queuing %s buffer=%u ...", name, *n_bufs_ptr);
_E_XIOCTL(VIDIOC_QBUF, &buf, "Can't queue %s buffer=%u", name, *n_bufs_ptr);
}
_E_LOG_DEBUG("All %s buffers are ready", name);
return 0;
error:
return -1;
error: // Mostly for _E_XIOCTL
return -1;
}
static void _m2m_encoder_cleanup(us_m2m_encoder_s *enc) {
if (_RUN(ready)) {
us_m2m_encoder_runtime_s *const run = enc->run;
bool say = false;
if (run->ready) {
say = true;
# define STOP_STREAM(x_name, x_type) { \
enum v4l2_buf_type m_type_var = x_type; \
_E_LOG_DEBUG("Stopping %s ...", x_name); \
if (us_xioctl(_RUN(fd), VIDIOC_STREAMOFF, &m_type_var) < 0) { \
if (us_xioctl(run->fd, VIDIOC_STREAMOFF, &m_type_var) < 0) { \
_E_LOG_PERROR("Can't stop %s", x_name); \
} \
}
STOP_STREAM("OUTPUT", V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
STOP_STREAM("INPUT", V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
# undef STOP_STREAM
}
# define DESTROY_BUFFERS(x_name, x_target) { \
if (_RUN(x_target##_bufs) != NULL) { \
for (unsigned m_index = 0; m_index < _RUN(n_##x_target##_bufs); ++m_index) { \
if (_RUN(x_target##_bufs[m_index].allocated) > 0 && _RUN(x_target##_bufs[m_index].data) != NULL) { \
if (munmap(_RUN(x_target##_bufs[m_index].data), _RUN(x_target##_bufs[m_index].allocated)) < 0) { \
# define DELETE_BUFFERS(x_name, x_target) { \
if (run->x_target##_bufs != NULL) { \
say = true; \
for (uint m_index = 0; m_index < run->n_##x_target##_bufs; ++m_index) { \
us_m2m_buffer_s *m_buf = &run->x_target##_bufs[m_index]; \
if (m_buf->allocated > 0 && m_buf->data != NULL) { \
if (munmap(m_buf->data, m_buf->allocated) < 0) { \
_E_LOG_PERROR("Can't unmap %s buffer=%u", #x_name, m_index); \
} \
} \
} \
free(_RUN(x_target##_bufs)); \
_RUN(x_target##_bufs) = NULL; \
US_DELETE(run->x_target##_bufs, free); \
} \
_RUN(n_##x_target##_bufs) = 0; \
run->n_##x_target##_bufs = 0; \
}
DELETE_BUFFERS("OUTPUT", output);
DELETE_BUFFERS("INPUT", input);
# undef DELETE_BUFFERS
DESTROY_BUFFERS("OUTPUT", output);
DESTROY_BUFFERS("INPUT", input);
# undef DESTROY_BUFFERS
if (_RUN(fd) >= 0) {
if (close(_RUN(fd)) < 0) {
if (run->fd >= 0) {
say = true;
if (close(run->fd) < 0) {
_E_LOG_PERROR("Can't close encoder device");
}
_RUN(fd) = -1;
run->fd = -1;
}
_RUN(last_online) = -1;
_RUN(ready) = false;
run->last_online = -1;
run->ready = false;
_E_LOG_DEBUG("Encoder state: ~~~ NOT READY ~~~");
if (say) {
_E_LOG_INFO("Encoder closed");
}
}
static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *src, us_frame_s *dest, bool force_key) {
assert(_RUN(ready));
us_m2m_encoder_runtime_s *const run = enc->run;
_E_LOG_DEBUG("Compressing new frame; force_key=%d ...", force_key);
assert(run->ready);
if (force_key) {
struct v4l2_control ctl = {0};
@@ -401,7 +447,7 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
input_buf.length = 1;
input_buf.m.planes = &input_plane;
if (_RUN(dma)) {
if (run->p_dma) {
input_buf.index = 0;
input_buf.memory = V4L2_MEMORY_DMABUF;
input_buf.field = V4L2_FIELD_NONE;
@@ -411,39 +457,47 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
input_buf.memory = V4L2_MEMORY_MMAP;
_E_LOG_DEBUG("Grabbing INPUT buffer ...");
_E_XIOCTL(VIDIOC_DQBUF, &input_buf, "Can't grab INPUT buffer");
if (input_buf.index >= _RUN(n_input_bufs)) {
if (input_buf.index >= run->n_input_bufs) {
_E_LOG_ERROR("V4L2 error: grabbed invalid INPUT: buffer=%u, n_bufs=%u",
input_buf.index, _RUN(n_input_bufs));
input_buf.index, run->n_input_bufs);
goto error;
}
_E_LOG_DEBUG("Grabbed INPUT buffer=%u", input_buf.index);
}
const uint64_t now = us_get_now_monotonic_u64();
const u64 now_ts = us_get_now_monotonic_u64();
struct timeval ts = {
.tv_sec = now / 1000000,
.tv_usec = now % 1000000,
.tv_sec = now_ts / 1000000,
.tv_usec = now_ts % 1000000,
};
input_buf.timestamp.tv_sec = ts.tv_sec;
input_buf.timestamp.tv_usec = ts.tv_usec;
input_plane.bytesused = src->used;
input_plane.length = src->used;
if (!_RUN(dma)) {
memcpy(_RUN(input_bufs[input_buf.index].data), src->data, src->used);
if (!run->p_dma) {
memcpy(run->input_bufs[input_buf.index].data, src->data, src->used);
}
const char *input_name = (_RUN(dma) ? "INPUT-DMA" : "INPUT");
const char *input_name = (run->p_dma ? "INPUT-DMA" : "INPUT");
_E_LOG_DEBUG("Sending%s %s buffer ...", (!_RUN(dma) ? " (releasing)" : ""), input_name);
_E_LOG_DEBUG("Sending%s %s buffer ...", (!run->p_dma ? " (releasing)" : ""), input_name);
_E_XIOCTL(VIDIOC_QBUF, &input_buf, "Can't send %s buffer", input_name);
// Для не-DMA отправка буфера по факту являтся освобождением этого буфера
bool input_released = !_RUN(dma);
bool input_released = !run->p_dma;
// https://github.com/pikvm/ustreamer/issues/253
// За секунду точно должно закодироваться.
const ldf deadline_ts = us_get_now_monotonic() + 1;
while (true) {
struct pollfd enc_poll = {_RUN(fd), POLLIN, 0};
if (us_get_now_monotonic() > deadline_ts) {
_E_LOG_ERROR("Waiting for the encoder is too long");
goto error;
}
struct pollfd enc_poll = {run->fd, POLLIN, 0};
_E_LOG_DEBUG("Polling encoder ...");
if (poll(&enc_poll, 1, 1000) < 0 && errno != EINTR) {
_E_LOG_PERROR("Can't poll encoder");
@@ -474,7 +528,7 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
// входному (с тем же таймстампом).
_E_LOG_DEBUG("Need to retry OUTPUT buffer due timestamp mismatch");
} else {
us_frame_set_data(dest, _RUN(output_bufs[output_buf.index].data), output_plane.bytesused);
us_frame_set_data(dest, run->output_bufs[output_buf.index].data, output_plane.bytesused);
dest->key = output_buf.flags & V4L2_BUF_FLAG_KEYFRAME;
dest->gop = enc->gop;
done = true;
@@ -488,10 +542,10 @@ static int _m2m_encoder_compress_raw(us_m2m_encoder_s *enc, const us_frame_s *sr
}
}
}
return 0;
error:
return -1;
error: // Mostly for _E_XIOCTL
return -1;
}
#undef _E_XIOCTL

View File

@@ -22,65 +22,49 @@
#pragma once
#include <stdlib.h>
#include <stdbool.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <fcntl.h>
#include <poll.h>
#include <errno.h>
#include <assert.h>
#include <sys/mman.h>
#include <linux/videodev2.h>
#include "../libs/tools.h"
#include "../libs/logging.h"
#include "../libs/types.h"
#include "../libs/frame.h"
#include "../libs/xioctl.h"
typedef struct {
uint8_t *data;
size_t allocated;
u8 *data;
uz allocated;
} us_m2m_buffer_s;
typedef struct {
int fd;
uint fps_limit;
us_m2m_buffer_s *input_bufs;
unsigned n_input_bufs;
uint n_input_bufs;
us_m2m_buffer_s *output_bufs;
unsigned n_output_bufs;
uint n_output_bufs;
unsigned width;
unsigned height;
unsigned input_format;
unsigned stride;
bool dma;
bool ready;
uint p_width;
uint p_height;
uint p_input_format;
uint p_stride;
bool p_dma;
int last_online;
bool ready;
int last_online;
} us_m2m_encoder_runtime_s;
typedef struct {
char *name;
char *path;
unsigned output_format;
unsigned fps;
unsigned bitrate;
unsigned gop;
unsigned quality;
bool allow_dma;
char *name;
char *path;
uint output_format;
uint bitrate;
uint gop;
uint quality;
bool allow_dma;
us_m2m_encoder_runtime_s *run;
} us_m2m_encoder_s;
us_m2m_encoder_s *us_m2m_h264_encoder_init(const char *name, const char *path, unsigned bitrate, unsigned gop);
us_m2m_encoder_s *us_m2m_mjpeg_encoder_init(const char *name, const char *path, unsigned quality);
us_m2m_encoder_s *us_m2m_jpeg_encoder_init(const char *name, const char *path, unsigned quality);
us_m2m_encoder_s *us_m2m_h264_encoder_init(const char *name, const char *path, uint bitrate, uint gop);
us_m2m_encoder_s *us_m2m_mjpeg_encoder_init(const char *name, const char *path, uint quality);
us_m2m_encoder_s *us_m2m_jpeg_encoder_init(const char *name, const char *path, uint quality);
void us_m2m_encoder_destroy(us_m2m_encoder_s *enc);
int us_m2m_encoder_compress(us_m2m_encoder_s *enc, const us_frame_s *src, us_frame_s *dest, bool force_key);

View File

@@ -22,7 +22,6 @@
#include <stdio.h>
#include <stdbool.h>
#include <signal.h>
#include <pthread.h>
@@ -30,6 +29,7 @@
#include "../libs/threading.h"
#include "../libs/logging.h"
#include "../libs/device.h"
#include "../libs/signal.h"
#include "options.h"
#include "encoder.h"
@@ -54,7 +54,7 @@ static void _block_thread_signals(void) {
static void *_stream_loop_thread(void *arg) {
(void)arg;
US_THREAD_RENAME("stream");
US_THREAD_SETTLE("stream");
_block_thread_signals();
us_stream_loop(_g_stream);
return NULL;
@@ -62,7 +62,7 @@ static void *_stream_loop_thread(void *arg) {
static void *_server_loop_thread(void *arg) {
(void)arg;
US_THREAD_RENAME("http");
US_THREAD_SETTLE("http");
_block_thread_signals();
us_server_loop(_g_server);
return NULL;
@@ -76,24 +76,6 @@ static void _signal_handler(int signum) {
us_server_loop_break(_g_server);
}
static void _install_signal_handlers(void) {
struct sigaction sig_act = {0};
assert(!sigemptyset(&sig_act.sa_mask));
sig_act.sa_handler = _signal_handler;
assert(!sigaddset(&sig_act.sa_mask, SIGINT));
assert(!sigaddset(&sig_act.sa_mask, SIGTERM));
US_LOG_DEBUG("Installing SIGINT handler ...");
assert(!sigaction(SIGINT, &sig_act, NULL));
US_LOG_DEBUG("Installing SIGTERM handler ...");
assert(!sigaction(SIGTERM, &sig_act, NULL));
US_LOG_DEBUG("Ignoring SIGPIPE ...");
assert(signal(SIGPIPE, SIG_IGN) != SIG_ERR);
}
int main(int argc, char *argv[]) {
assert(argc >= 0);
int exit_code = 0;
@@ -112,7 +94,7 @@ int main(int argc, char *argv[]) {
us_gpio_init();
# endif
_install_signal_handlers();
us_install_signals_handler(_signal_handler, true);
if ((exit_code = us_server_listen(_g_server)) == 0) {
# ifdef WITH_GPIO

View File

@@ -92,7 +92,7 @@ enum _US_OPT_VALUES {
_O_##x_prefix##_RM, \
_O_##x_prefix##_CLIENT_TTL, \
_O_##x_prefix##_TIMEOUT,
ADD_SINK(SINK)
ADD_SINK(JPEG_SINK)
ADD_SINK(RAW_SINK)
ADD_SINK(H264_SINK)
_O_H264_BITRATE,
@@ -184,18 +184,25 @@ static const struct option _LONG_OPTS[] = {
{"server-timeout", required_argument, NULL, _O_SERVER_TIMEOUT},
# define ADD_SINK(x_opt, x_prefix) \
{x_opt "sink", required_argument, NULL, _O_##x_prefix}, \
{x_opt "sink-mode", required_argument, NULL, _O_##x_prefix##_MODE}, \
{x_opt "sink-rm", no_argument, NULL, _O_##x_prefix##_RM}, \
{x_opt "sink-client-ttl", required_argument, NULL, _O_##x_prefix##_CLIENT_TTL}, \
{x_opt "sink-timeout", required_argument, NULL, _O_##x_prefix##_TIMEOUT},
ADD_SINK("", SINK)
ADD_SINK("raw-", RAW_SINK)
ADD_SINK("h264-", H264_SINK)
{x_opt "-sink", required_argument, NULL, _O_##x_prefix}, \
{x_opt "-sink-mode", required_argument, NULL, _O_##x_prefix##_MODE}, \
{x_opt "-sink-rm", no_argument, NULL, _O_##x_prefix##_RM}, \
{x_opt "-sink-client-ttl", required_argument, NULL, _O_##x_prefix##_CLIENT_TTL}, \
{x_opt "-sink-timeout", required_argument, NULL, _O_##x_prefix##_TIMEOUT},
ADD_SINK("jpeg", JPEG_SINK)
ADD_SINK("raw", RAW_SINK)
ADD_SINK("h264", H264_SINK)
# undef ADD_SINK
// Extra opts for H.264
{"h264-bitrate", required_argument, NULL, _O_H264_BITRATE},
{"h264-gop", required_argument, NULL, _O_H264_GOP},
{"h264-m2m-device", required_argument, NULL, _O_H264_M2M_DEVICE},
# undef ADD_SINK
// Compatibility
{"sink", required_argument, NULL, _O_JPEG_SINK},
{"sink-mode", required_argument, NULL, _O_JPEG_SINK_MODE},
{"sink-rm", no_argument, NULL, _O_JPEG_SINK_RM},
{"sink-client-ttl", required_argument, NULL, _O_JPEG_SINK_CLIENT_TTL},
{"sink-timeout", required_argument, NULL, _O_JPEG_SINK_TIMEOUT},
# ifdef WITH_GPIO
{"gpio-device", required_argument, NULL, _O_GPIO_DEVICE},
@@ -250,7 +257,7 @@ us_options_s *us_options_init(unsigned argc, char *argv[]) {
}
void us_options_destroy(us_options_s *options) {
US_DELETE(options->sink, us_memsink_destroy);
US_DELETE(options->jpeg_sink, us_memsink_destroy);
US_DELETE(options->raw_sink, us_memsink_destroy);
US_DELETE(options->h264_sink, us_memsink_destroy);
@@ -296,11 +303,13 @@ int options_parse(us_options_s *options, us_device_s *dev, us_encoder_s *enc, us
break; \
}
# define OPT_PARSE(x_name, x_dest, x_func, x_invalid, x_available) { \
if ((x_dest = x_func(optarg)) == x_invalid) { \
# define OPT_PARSE_ENUM(x_name, x_dest, x_func, x_available) { \
const int m_value = x_func(optarg); \
if (m_value < 0) { \
printf("Unknown " x_name ": %s; available: %s\n", optarg, x_available); \
return -1; \
} \
x_dest = m_value; \
break; \
}
@@ -336,7 +345,7 @@ int options_parse(us_options_s *options, us_device_s *dev, us_encoder_s *enc, us
bool x_prefix##_rm = false; \
unsigned x_prefix##_client_ttl = 10; \
unsigned x_prefix##_timeout = 1;
ADD_SINK(sink);
ADD_SINK(jpeg_sink);
ADD_SINK(raw_sink);
ADD_SINK(h264_sink);
# undef ADD_SINK
@@ -355,10 +364,10 @@ int options_parse(us_options_s *options, us_device_s *dev, us_encoder_s *enc, us
case _O_RESOLUTION: OPT_RESOLUTION("--resolution", dev->width, dev->height, true);
# pragma GCC diagnostic ignored "-Wsign-compare"
# pragma GCC diagnostic push
case _O_FORMAT: OPT_PARSE("pixel format", dev->format, us_device_parse_format, US_FORMAT_UNKNOWN, US_FORMATS_STR);
case _O_FORMAT: OPT_PARSE_ENUM("pixel format", dev->format, us_device_parse_format, US_FORMATS_STR);
# pragma GCC diagnostic pop
case _O_TV_STANDARD: OPT_PARSE("TV standard", dev->standard, us_device_parse_standard, US_STANDARD_UNKNOWN, US_STANDARDS_STR);
case _O_IO_METHOD: OPT_PARSE("IO method", dev->io_method, us_device_parse_io_method, US_IO_METHOD_UNKNOWN, US_IO_METHODS_STR);
case _O_TV_STANDARD: OPT_PARSE_ENUM("TV standard", dev->standard, us_device_parse_standard, US_STANDARDS_STR);
case _O_IO_METHOD: OPT_PARSE_ENUM("IO method", dev->io_method, us_device_parse_io_method, US_IO_METHODS_STR);
case _O_DESIRED_FPS: OPT_NUMBER("--desired-fps", dev->desired_fps, 0, US_VIDEO_MAX_FPS, 0);
case _O_MIN_FRAME_SIZE: OPT_NUMBER("--min-frame-size", dev->min_frame_size, 1, 8192, 0);
case _O_PERSISTENT: OPT_SET(dev->persistent, true);
@@ -366,10 +375,10 @@ int options_parse(us_options_s *options, us_device_s *dev, us_encoder_s *enc, us
case _O_BUFFERS: OPT_NUMBER("--buffers", dev->n_bufs, 1, 32, 0);
case _O_WORKERS: OPT_NUMBER("--workers", enc->n_workers, 1, 32, 0);
case _O_QUALITY: OPT_NUMBER("--quality", dev->jpeg_quality, 1, 100, 0);
case _O_ENCODER: OPT_PARSE("encoder type", enc->type, us_encoder_parse_type, US_ENCODER_TYPE_UNKNOWN, ENCODER_TYPES_STR);
case _O_ENCODER: OPT_PARSE_ENUM("encoder type", enc->type, us_encoder_parse_type, ENCODER_TYPES_STR);
case _O_GLITCHED_RESOLUTIONS: break; // Deprecated
case _O_BLANK: break; // Deprecated
case _O_LAST_AS_BLANK: OPT_NUMBER("--last-as-blank", stream->last_as_blank, 0, 86400, 0);
case _O_LAST_AS_BLANK: break; // Deprecated
case _O_SLOWDOWN: OPT_SET(stream->slowdown, true);
case _O_DEVICE_TIMEOUT: OPT_NUMBER("--device-timeout", dev->timeout, 1, 60, 0);
case _O_DEVICE_ERROR_DELAY: OPT_NUMBER("--device-error-delay", stream->error_delay, 1, 60, 0);
@@ -430,17 +439,17 @@ int options_parse(us_options_s *options, us_device_s *dev, us_encoder_s *enc, us
# define ADD_SINK(x_opt, x_lp, x_up) \
case _O_##x_up: OPT_SET(x_lp##_name, optarg); \
case _O_##x_up##_MODE: OPT_NUMBER("--" #x_opt "sink-mode", x_lp##_mode, INT_MIN, INT_MAX, 8); \
case _O_##x_up##_MODE: OPT_NUMBER("--" #x_opt "-sink-mode", x_lp##_mode, INT_MIN, INT_MAX, 8); \
case _O_##x_up##_RM: OPT_SET(x_lp##_rm, true); \
case _O_##x_up##_CLIENT_TTL: OPT_NUMBER("--" #x_opt "sink-client-ttl", x_lp##_client_ttl, 1, 60, 0); \
case _O_##x_up##_TIMEOUT: OPT_NUMBER("--" #x_opt "sink-timeout", x_lp##_timeout, 1, 60, 0);
ADD_SINK("", sink, SINK)
ADD_SINK("raw-", raw_sink, RAW_SINK)
ADD_SINK("h264-", h264_sink, H264_SINK)
case _O_##x_up##_CLIENT_TTL: OPT_NUMBER("--" #x_opt "-sink-client-ttl", x_lp##_client_ttl, 1, 60, 0); \
case _O_##x_up##_TIMEOUT: OPT_NUMBER("--" #x_opt "-sink-timeout", x_lp##_timeout, 1, 60, 0);
ADD_SINK("jpeg", jpeg_sink, JPEG_SINK)
ADD_SINK("raw", raw_sink, RAW_SINK)
ADD_SINK("h264", h264_sink, H264_SINK)
# undef ADD_SINK
case _O_H264_BITRATE: OPT_NUMBER("--h264-bitrate", stream->h264_bitrate, 25, 20000, 0);
case _O_H264_GOP: OPT_NUMBER("--h264-gop", stream->h264_gop, 0, 60, 0);
case _O_H264_M2M_DEVICE: OPT_SET(stream->h264_m2m_path, optarg);
# undef ADD_SINK
# ifdef WITH_GPIO
case _O_GPIO_DEVICE: OPT_SET(us_g_gpio.path, optarg);
@@ -457,7 +466,7 @@ int options_parse(us_options_s *options, us_device_s *dev, us_encoder_s *enc, us
};
break;
# endif
case _O_EXIT_ON_NO_CLIENTS: OPT_NUMBER("--exit-on-no-clients", server->exit_on_no_clients, 0, 86400, 0);
case _O_EXIT_ON_NO_CLIENTS: OPT_NUMBER("--exit-on-no-clients", stream->exit_on_no_clients, 0, 86400, 0);
# ifdef WITH_SETPROCTITLE
case _O_PROCESS_NAME_PREFIX: OPT_SET(process_name_prefix, optarg);
# endif
@@ -495,7 +504,7 @@ int options_parse(us_options_s *options, us_device_s *dev, us_encoder_s *enc, us
} \
stream->x_prefix = options->x_prefix; \
}
ADD_SINK("JPEG", sink);
ADD_SINK("JPEG", jpeg_sink);
ADD_SINK("RAW", raw_sink);
ADD_SINK("H264", h264_sink);
# undef ADD_SINK
@@ -622,13 +631,8 @@ static void _help(FILE *fp, const us_device_s *dev, const us_encoder_s *enc, con
SAY(" * M2M-IMAGE ── GPU-accelerated JPEG encoding using V4L2 M2M image interface;");
SAY(" * NOOP ─────── Don't compress MJPEG stream (do nothing).\n");
SAY(" -g|--glitched-resolutions <WxH,...> ─ It doesn't do anything. Still here for compatibility.\n");
SAY(" -k|--blank <path> ─────────────────── It doesn't do anything. Still here for compatibility..\n");
SAY(" during the streaming. Default: black screen 640x480 with 'NO SIGNAL'.\n");
SAY(" -K|--last-as-blank <sec> ──────────── Show the last frame received from the camera after it was disconnected,");
SAY(" but no more than specified time (or endlessly if 0 is specified).");
SAY(" If the device has not yet been online, display some error text.");
SAY(" Default: disabled.");
SAY(" Note: currently this option has no effect on memory sinks.\n");
SAY(" -k|--blank <path> ─────────────────── It doesn't do anything. Still here for compatibility.\n");
SAY(" -K|--last-as-blank <sec> ──────────── It doesn't do anything. Still here for compatibility.\n");
SAY(" -l|--slowdown ─────────────────────── Slowdown capturing to 1 FPS or less when no stream or sink clients");
SAY(" are connected. Useful to reduce CPU consumption. Default: disabled.\n");
SAY(" --device-timeout <sec> ────────────── Timeout for device querying. Default: %u.\n", dev->timeout);
@@ -680,18 +684,20 @@ static void _help(FILE *fp, const us_device_s *dev, const us_encoder_s *enc, con
# define ADD_SINK(x_name, x_opt) \
SAY(x_name " sink options:"); \
SAY("══════════════════"); \
SAY(" --" x_opt "sink <name> ──────────── Use the shared memory to sink " x_name " frames. Default: disabled.\n"); \
SAY(" --" x_opt "sink-mode <mode> ─────── Set " x_name " sink permissions (like 777). Default: 660.\n"); \
SAY(" --" x_opt "sink-rm ──────────────── Remove shared memory on stop. Default: disabled.\n"); \
SAY(" --" x_opt "sink-client-ttl <sec> ── Client TTL. Default: 10.\n"); \
SAY(" --" x_opt "sink-timeout <sec> ───── Timeout for lock. Default: 1.\n");
ADD_SINK("JPEG", "")
ADD_SINK("RAW", "raw-")
ADD_SINK("H264", "h264-")
SAY(" --" x_opt "-sink <name> ──────────── Use the shared memory to sink " x_name " frames. Default: disabled."); \
SAY(" The name should end with a suffix \"." x_opt "\" or \"." x_opt "\"."); \
SAY(" Default: disabled.\n"); \
SAY(" --" x_opt "-sink-mode <mode> ─────── Set " x_name " sink permissions (like 777). Default: 660.\n"); \
SAY(" --" x_opt "-sink-rm ──────────────── Remove shared memory on stop. Default: disabled.\n"); \
SAY(" --" x_opt "-sink-client-ttl <sec> ── Client TTL. Default: 10.\n"); \
SAY(" --" x_opt "-sink-timeout <sec> ───── Timeout for lock. Default: 1.\n");
ADD_SINK("JPEG", "jpeg")
ADD_SINK("RAW", "raw")
ADD_SINK("H264", "h264")
# undef ADD_SINK
SAY(" --h264-bitrate <kbps> ───────── H264 bitrate in Kbps. Default: %u.\n", stream->h264_bitrate);
SAY(" --h264-gop <N> ──────────────── Interval between keyframes. Default: %u.\n", stream->h264_gop);
SAY(" --h264-m2m-device </dev/path> ─ Path to V4L2 M2M encoder device. Default: auto select.\n");
# undef ADD_SINK
# ifdef WITH_GPIO
SAY("GPIO options:");
SAY("═════════════");

View File

@@ -53,7 +53,7 @@ typedef struct {
unsigned argc;
char **argv;
char **argv_copy;
us_memsink_s *sink;
us_memsink_s *jpeg_sink;
us_memsink_s *raw_sink;
us_memsink_s *h264_sink;
} us_options_s;

View File

@@ -22,25 +22,64 @@
#include "stream.h"
#include <stdlib.h>
#include <stdatomic.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
static us_workers_pool_s *_stream_init_loop(us_stream_s *stream);
static void _stream_expose_frame(us_stream_s *stream, us_frame_s *frame);
#include <pthread.h>
#include "../libs/types.h"
#include "../libs/tools.h"
#include "../libs/threading.h"
#include "../libs/process.h"
#include "../libs/logging.h"
#include "../libs/ring.h"
#include "../libs/frame.h"
#include "../libs/memsink.h"
#include "../libs/device.h"
#include "blank.h"
#include "encoder.h"
#include "workers.h"
#include "h264.h"
#ifdef WITH_GPIO
# include "gpio/gpio.h"
#endif
#define _RUN(x_next) stream->run->x_next
typedef struct {
pthread_t tid;
us_device_s *dev;
us_queue_s *queue;
pthread_mutex_t *mutex;
atomic_bool *stop;
} _releaser_context_s;
#define _SINK_PUT(x_sink, x_frame) { \
if (stream->x_sink && us_memsink_server_check(stream->x_sink, x_frame)) {\
bool m_key_requested; /* Unused */ \
us_memsink_server_put(stream->x_sink, x_frame, &m_key_requested); \
} \
}
typedef struct {
pthread_t tid;
us_queue_s *queue;
us_stream_s *stream;
atomic_bool *stop;
} _worker_context_s;
#define _H264_PUT(x_frame, x_force_key) { \
if (_RUN(h264)) { \
us_h264_stream_process(_RUN(h264), x_frame, x_force_key); \
} \
}
static void _stream_set_capture_state(us_stream_s *stream, uint width, uint height, bool online, uint captured_fps);
static void *_releaser_thread(void *v_ctx);
static void *_jpeg_thread(void *v_ctx);
static void *_h264_thread(void *v_ctx);
static void *_raw_thread(void *v_ctx);
static us_hw_buffer_s *_get_latest_hw(us_queue_s *queue);
static bool _stream_has_jpeg_clients_cached(us_stream_s *stream);
static bool _stream_has_any_clients_cached(us_stream_s *stream);
static int _stream_init_loop(us_stream_s *stream);
static void _stream_expose_jpeg(us_stream_s *stream, const us_frame_s *frame);
static void _stream_expose_raw(us_stream_s *stream, const us_frame_s *frame);
static void _stream_check_suicide(us_stream_s *stream);
us_stream_s *us_stream_init(us_device_s *dev, us_encoder_s *enc) {
@@ -48,7 +87,9 @@ us_stream_s *us_stream_init(us_device_s *dev, us_encoder_s *enc) {
US_CALLOC(run, 1);
US_RING_INIT_WITH_ITEMS(run->http_jpeg_ring, 4, us_frame_init);
atomic_init(&run->http_has_clients, false);
atomic_init(&run->captured_fps, 0);
atomic_init(&run->http_snapshot_requested, 0);
atomic_init(&run->http_last_request_ts, 0);
atomic_init(&run->http_capture_state, 0);
atomic_init(&run->stop, false);
run->blank = us_blank_init();
@@ -56,11 +97,13 @@ us_stream_s *us_stream_init(us_device_s *dev, us_encoder_s *enc) {
US_CALLOC(stream, 1);
stream->dev = dev;
stream->enc = enc;
stream->last_as_blank = -1;
stream->error_delay = 1;
stream->h264_bitrate = 5000; // Kbps
stream->h264_gop = 30;
stream->run = run;
us_blank_draw(run->blank, "< NO SIGNAL >", dev->width, dev->height);
_stream_set_capture_state(stream, dev->width, dev->height, false, 0);
return stream;
}
@@ -72,257 +115,464 @@ void us_stream_destroy(us_stream_s *stream) {
}
void us_stream_loop(us_stream_s *stream) {
US_LOG_INFO("Using V4L2 device: %s", stream->dev->path);
US_LOG_INFO("Using desired FPS: %u", stream->dev->desired_fps);
us_stream_runtime_s *const run = stream->run;
us_device_s *const dev = stream->dev;
US_LOG_INFO("Using V4L2 device: %s", dev->path);
US_LOG_INFO("Using desired FPS: %u", dev->desired_fps);
atomic_store(&run->http_last_request_ts, us_get_now_monotonic());
if (stream->h264_sink != NULL) {
_RUN(h264) = us_h264_stream_init(stream->h264_sink, stream->h264_m2m_path, stream->h264_bitrate, stream->h264_gop);
run->h264 = us_h264_stream_init(stream->h264_sink, stream->h264_m2m_path, stream->h264_bitrate, stream->h264_gop);
}
for (us_workers_pool_s *pool; (pool = _stream_init_loop(stream)) != NULL;) {
long double grab_after = 0;
unsigned fluency_passed = 0;
unsigned captured_fps_accum = 0;
long long captured_fps_second = 0;
while (!_stream_init_loop(stream)) {
atomic_bool threads_stop;
atomic_init(&threads_stop, false);
pthread_mutex_t release_mutex;
US_MUTEX_INIT(release_mutex);
const uint n_releasers = dev->run->n_bufs;
_releaser_context_s *releasers;
US_CALLOC(releasers, n_releasers);
for (uint index = 0; index < n_releasers; ++index) {
_releaser_context_s *ctx = &releasers[index];
ctx->dev = dev;
ctx->queue = us_queue_init(1);
ctx->mutex = &release_mutex;
ctx->stop = &threads_stop;
US_THREAD_CREATE(ctx->tid, _releaser_thread, ctx);
}
_worker_context_s jpeg_ctx = {
.queue = us_queue_init(dev->run->n_bufs),
.stream = stream,
.stop = &threads_stop,
};
US_THREAD_CREATE(jpeg_ctx.tid, _jpeg_thread, &jpeg_ctx);
_worker_context_s h264_ctx;
if (run->h264 != NULL) {
h264_ctx.queue = us_queue_init(dev->run->n_bufs);
h264_ctx.stream = stream;
h264_ctx.stop = &threads_stop;
US_THREAD_CREATE(h264_ctx.tid, _h264_thread, &h264_ctx);
}
_worker_context_s raw_ctx;
if (stream->raw_sink != NULL) {
raw_ctx.queue = us_queue_init(2);
raw_ctx.stream = stream;
raw_ctx.stop = &threads_stop;
US_THREAD_CREATE(raw_ctx.tid, _raw_thread, &raw_ctx);
}
uint captured_fps_accum = 0;
sll captured_fps_ts = 0;
uint captured_fps = 0;
US_LOG_INFO("Capturing ...");
while (!atomic_load(&_RUN(stop))) {
US_SEP_DEBUG('-');
US_LOG_DEBUG("Waiting for worker ...");
us_worker_s *const ready_wr = us_workers_pool_wait(pool);
us_encoder_job_s *const ready_job = (us_encoder_job_s *)(ready_wr->job);
if (ready_job->hw != NULL) {
if (us_device_release_buffer(stream->dev, ready_job->hw) < 0) {
ready_wr->job_failed = true;
}
ready_job->hw = NULL;
if (!ready_wr->job_failed) {
if (ready_wr->job_timely) {
_stream_expose_frame(stream, ready_job->dest);
US_LOG_PERF("##### Encoded JPEG exposed; worker=%s, latency=%.3Lf",
ready_wr->name, us_get_now_monotonic() - ready_job->dest->grab_ts);
} else {
US_LOG_PERF("----- Encoded JPEG dropped; worker=%s", ready_wr->name);
}
} else {
break;
}
uint slowdown_count = 0;
while (!atomic_load(&run->stop) && !atomic_load(&threads_stop)) {
us_hw_buffer_s *hw;
switch (us_device_grab_buffer(dev, &hw)) {
case -2: continue; // Broken frame
case -1: goto close; // Error
default: break; // Grabbed on >= 0
}
bool h264_force_key = false;
if (stream->slowdown) {
unsigned slc = 0;
for (; slc < 10 && !atomic_load(&_RUN(stop)) && !us_stream_has_clients(stream); ++slc) {
usleep(100000);
}
h264_force_key = (slc == 10);
const sll now_sec_ts = us_floor_ms(us_get_now_monotonic());
if (now_sec_ts != captured_fps_ts) {
captured_fps = captured_fps_accum;
captured_fps_accum = 0;
captured_fps_ts = now_sec_ts;
US_LOG_PERF_FPS("A new second has come; captured_fps=%u", captured_fps);
}
captured_fps_accum += 1;
if (atomic_load(&_RUN(stop))) {
break;
_stream_set_capture_state(stream, dev->run->width, dev->run->height, true, captured_fps);
# ifdef WITH_GPIO
us_gpio_set_stream_online(true);
# endif
us_device_buffer_incref(hw); // JPEG
us_queue_put(jpeg_ctx.queue, hw, 0);
if (run->h264 != NULL) {
us_device_buffer_incref(hw); // H264
us_queue_put(h264_ctx.queue, hw, 0);
}
if (stream->raw_sink != NULL) {
us_device_buffer_incref(hw); // RAW
us_queue_put(raw_ctx.queue, hw, 0);
}
us_queue_put(releasers[hw->buf.index].queue, hw, 0); // Plan to release
bool has_read;
bool has_error;
const int selected = us_device_select(stream->dev, &has_read, &has_error);
if (selected < 0) {
if (errno != EINTR) {
US_LOG_PERROR("Mainloop select() error");
break;
}
} else if (selected == 0) { // Persistent timeout
# ifdef WITH_GPIO
us_gpio_set_stream_online(false);
# endif
} else {
if (has_read) {
# ifdef WITH_GPIO
us_gpio_set_stream_online(true);
# endif
const long double now = us_get_now_monotonic();
const long long now_second = us_floor_ms(now);
us_hw_buffer_s *hw;
const int buf_index = us_device_grab_buffer(stream->dev, &hw);
if (buf_index >= 0) {
if (now < grab_after) {
fluency_passed += 1;
US_LOG_VERBOSE("Passed %u frames for fluency: now=%.03Lf, grab_after=%.03Lf",
fluency_passed, now, grab_after);
if (us_device_release_buffer(stream->dev, hw) < 0) {
break;
}
} else {
fluency_passed = 0;
if (now_second != captured_fps_second) {
US_LOG_PERF_FPS("A new second has come; captured_fps=%u", captured_fps_accum);
atomic_store(&stream->run->captured_fps, captured_fps_accum);
captured_fps_accum = 0;
captured_fps_second = now_second;
}
captured_fps_accum += 1;
const long double fluency_delay = us_workers_pool_get_fluency_delay(pool, ready_wr);
grab_after = now + fluency_delay;
US_LOG_VERBOSE("Fluency: delay=%.03Lf, grab_after=%.03Lf", fluency_delay, grab_after);
ready_job->hw = hw;
us_workers_pool_assign(pool, ready_wr);
US_LOG_DEBUG("Assigned new frame in buffer=%d to worker=%s", buf_index, ready_wr->name);
_SINK_PUT(raw_sink, &hw->raw);
_H264_PUT(&hw->raw, h264_force_key);
}
} else if (buf_index != -2) { // -2 for broken frame
break;
}
}
if (has_error && us_device_consume_event(stream->dev) < 0) {
break;
// Мы не обновляем здесь состояние синков, потому что это происходит внутри обслуживающих их потоков
_stream_check_suicide(stream);
if (stream->slowdown && !_stream_has_any_clients_cached(stream)) {
usleep(100 * 1000);
slowdown_count = (slowdown_count + 1) % 10;
if (slowdown_count > 0) {
continue;
}
}
}
us_workers_pool_destroy(pool);
us_device_close(stream->dev);
close:
atomic_store(&threads_stop, true);
# ifdef WITH_GPIO
us_gpio_set_stream_online(false);
# endif
if (stream->raw_sink != NULL) {
US_THREAD_JOIN(raw_ctx.tid);
us_queue_destroy(raw_ctx.queue);
}
if (run->h264 != NULL) {
US_THREAD_JOIN(h264_ctx.tid);
us_queue_destroy(h264_ctx.queue);
}
US_THREAD_JOIN(jpeg_ctx.tid);
us_queue_destroy(jpeg_ctx.queue);
for (uint index = 0; index < n_releasers; ++index) {
US_THREAD_JOIN(releasers[index].tid);
us_queue_destroy(releasers[index].queue);
}
free(releasers);
US_MUTEX_DESTROY(release_mutex);
atomic_store(&threads_stop, false);
us_encoder_close(stream->enc);
us_device_close(dev);
if (!atomic_load(&run->stop)) {
US_SEP_INFO('=');
}
}
US_DELETE(_RUN(h264), us_h264_stream_destroy);
US_DELETE(run->h264, us_h264_stream_destroy);
}
void us_stream_loop_break(us_stream_s *stream) {
atomic_store(&_RUN(stop), true);
atomic_store(&stream->run->stop, true);
}
bool us_stream_has_clients(us_stream_s *stream) {
return (
atomic_load(&_RUN(http_has_clients))
// has_clients синков НЕ обновляются в реальном времени
|| (stream->sink != NULL && atomic_load(&stream->sink->has_clients))
|| (_RUN(h264) != NULL && /*_RUN(h264->sink) == NULL ||*/ atomic_load(&_RUN(h264->sink->has_clients)))
void us_stream_get_capture_state(us_stream_s *stream, uint *width, uint *height, bool *online, uint *captured_fps) {
const u64 state = atomic_load(&stream->run->http_capture_state);
*width = state & 0xFFFF;
*height = (state >> 16) & 0xFFFF;
*captured_fps = (state >> 32) & 0xFFFF;
*online = (state >> 48) & 1;
}
void _stream_set_capture_state(us_stream_s *stream, uint width, uint height, bool online, uint captured_fps) {
const u64 state = (
(u64)(width & 0xFFFF)
| ((u64)(height & 0xFFFF) << 16)
| ((u64)(captured_fps & 0xFFFF) << 32)
| ((u64)(online ? 1 : 0) << 48)
);
atomic_store(&stream->run->http_capture_state, state);
}
static us_workers_pool_s *_stream_init_loop(us_stream_s *stream) {
int access_errno = 0;
while (!atomic_load(&_RUN(stop))) {
atomic_store(&stream->run->captured_fps, 0);
_stream_expose_frame(stream, NULL);
static void *_releaser_thread(void *v_ctx) {
US_THREAD_SETTLE("str_rel")
_releaser_context_s *ctx = v_ctx;
if (access(stream->dev->path, R_OK|W_OK) < 0) {
if (access_errno != errno) {
US_SEP_INFO('=');
US_LOG_PERROR("Can't access device");
US_LOG_INFO("Waiting for the device access ...");
access_errno = errno;
while (!atomic_load(ctx->stop)) {
us_hw_buffer_s *hw;
if (us_queue_get(ctx->queue, (void**)&hw, 0.1) < 0) {
continue;
}
while (atomic_load(&hw->refs) > 0) {
if (atomic_load(ctx->stop)) {
goto done;
}
goto sleep_and_retry;
usleep(5 * 1000);
}
US_SEP_INFO('=');
access_errno = 0;
stream->dev->dma_export = (
stream->enc->type == US_ENCODER_TYPE_M2M_VIDEO
|| stream->enc->type == US_ENCODER_TYPE_M2M_IMAGE
|| _RUN(h264) != NULL
);
if (us_device_open(stream->dev) == 0) {
return us_encoder_workers_pool_init(stream->enc, stream->dev);
US_MUTEX_LOCK(*ctx->mutex);
const int released = us_device_release_buffer(ctx->dev, hw);
US_MUTEX_UNLOCK(*ctx->mutex);
if (released < 0) {
goto done;
}
US_LOG_INFO("Sleeping %u seconds before new stream init ...", stream->error_delay);
}
sleep_and_retry:
sleep(stream->error_delay);
done:
atomic_store(ctx->stop, true); // Stop all other guys on error
return NULL;
}
static void *_jpeg_thread(void *v_ctx) {
US_THREAD_SETTLE("str_jpeg")
_worker_context_s *ctx = v_ctx;
us_stream_s *stream = ctx->stream;
ldf grab_after_ts = 0;
uint fluency_passed = 0;
while (!atomic_load(ctx->stop)) {
us_worker_s *const ready_wr = us_workers_pool_wait(stream->enc->run->pool);
us_encoder_job_s *const ready_job = ready_wr->job;
if (ready_job->hw != NULL) {
us_device_buffer_decref(ready_job->hw);
ready_job->hw = NULL;
if (ready_wr->job_failed) {
// pass
} else if (ready_wr->job_timely) {
_stream_expose_jpeg(stream, ready_job->dest);
if (atomic_load(&stream->run->http_snapshot_requested) > 0) { // Process real snapshots
atomic_fetch_sub(&stream->run->http_snapshot_requested, 1);
}
US_LOG_PERF("JPEG: ##### Encoded JPEG exposed; worker=%s, latency=%.3Lf",
ready_wr->name, us_get_now_monotonic() - ready_job->dest->grab_ts);
} else {
US_LOG_PERF("JPEG: ----- Encoded JPEG dropped; worker=%s", ready_wr->name);
}
}
us_hw_buffer_s *hw = _get_latest_hw(ctx->queue);
if (hw == NULL) {
continue;
}
const bool update_required = (stream->jpeg_sink != NULL && us_memsink_server_check(stream->jpeg_sink, NULL));
if (!update_required && !_stream_has_jpeg_clients_cached(stream)) {
US_LOG_VERBOSE("JPEG: Passed encoding because nobody is watching");
us_device_buffer_decref(hw);
continue;
}
const ldf now_ts = us_get_now_monotonic();
if (now_ts < grab_after_ts) {
fluency_passed += 1;
US_LOG_VERBOSE("JPEG: Passed %u frames for fluency: now=%.03Lf, grab_after=%.03Lf",
fluency_passed, now_ts, grab_after_ts);
us_device_buffer_decref(hw);
continue;
}
fluency_passed = 0;
const ldf fluency_delay = us_workers_pool_get_fluency_delay(stream->enc->run->pool, ready_wr);
grab_after_ts = now_ts + fluency_delay;
US_LOG_VERBOSE("JPEG: Fluency: delay=%.03Lf, grab_after=%.03Lf", fluency_delay, grab_after_ts);
ready_job->hw = hw;
us_workers_pool_assign(stream->enc->run->pool, ready_wr);
US_LOG_DEBUG("JPEG: Assigned new frame in buffer=%d to worker=%s", hw->buf.index, ready_wr->name);
}
return NULL;
}
static void _stream_expose_frame(us_stream_s *stream, us_frame_s *frame) {
static void *_h264_thread(void *v_ctx) {
US_THREAD_SETTLE("str_h264");
_worker_context_s *ctx = v_ctx;
us_h264_stream_s *h264 = ctx->stream->run->h264;
ldf grab_after_ts = 0;
ldf last_encode_ts = us_get_now_monotonic();
while (!atomic_load(ctx->stop)) {
us_hw_buffer_s *hw = _get_latest_hw(ctx->queue);
if (hw == NULL) {
continue;
}
if (!us_memsink_server_check(h264->sink, NULL)) {
us_device_buffer_decref(hw);
US_LOG_VERBOSE("H264: Passed encoding because nobody is watching");
continue;
}
if (hw->raw.grab_ts < grab_after_ts) {
us_device_buffer_decref(hw);
US_LOG_VERBOSE("H264: Passed encoding for FPS limit: %u", h264->enc->run->fps_limit);
continue;
}
// Форсим кейфрейм, если от захвата давно не было фреймов
const ldf now_ts = us_get_now_monotonic();
const bool force_key = (last_encode_ts + 0.5 < now_ts);
us_h264_stream_process(h264, &hw->raw, force_key);
last_encode_ts = now_ts;
// M2M-енкодер увеличивает задержку на 100 милисекунд при 1080p, если скормить ему больше 30 FPS.
// Поэтому у нас есть два режима: 60 FPS для маленьких видео и 30 для 1920x1080(1200).
// Следующй фрейм захватывается не раньше, чем это требуется по FPS, минус небольшая
// погрешность (если захват неравномерный) - немного меньше 1/60, и примерно треть от 1/30.
const ldf frame_interval = (ldf)1 / h264->enc->run->fps_limit;
grab_after_ts = hw->raw.grab_ts + frame_interval - 0.01;
us_device_buffer_decref(hw);
}
return NULL;
}
static void *_raw_thread(void *v_ctx) {
US_THREAD_SETTLE("str_raw");
_worker_context_s *ctx = v_ctx;
while (!atomic_load(ctx->stop)) {
us_hw_buffer_s *hw = _get_latest_hw(ctx->queue);
if (hw == NULL) {
continue;
}
if (!us_memsink_server_check(ctx->stream->raw_sink, NULL)) {
us_device_buffer_decref(hw);
US_LOG_VERBOSE("RAW: Passed publishing because nobody is watching");
continue;
}
us_memsink_server_put(ctx->stream->raw_sink, &hw->raw, false);
us_device_buffer_decref(hw);
}
return NULL;
}
static us_hw_buffer_s *_get_latest_hw(us_queue_s *queue) {
us_hw_buffer_s *hw;
if (us_queue_get(queue, (void**)&hw, 0.1) < 0) {
return NULL;
}
while (!us_queue_is_empty(queue)) { // Берем только самый свежий кадр
us_device_buffer_decref(hw);
assert(!us_queue_get(queue, (void**)&hw, 0));
}
return hw;
}
static bool _stream_has_jpeg_clients_cached(us_stream_s *stream) {
const us_stream_runtime_s *const run = stream->run;
return (
atomic_load(&run->http_has_clients)
|| (atomic_load(&run->http_snapshot_requested) > 0)
|| (stream->jpeg_sink != NULL && atomic_load(&stream->jpeg_sink->has_clients))
);
}
static bool _stream_has_any_clients_cached(us_stream_s *stream) {
const us_stream_runtime_s *const run = stream->run;
return (
_stream_has_jpeg_clients_cached(stream)
|| (run->h264 != NULL && atomic_load(&run->h264->sink->has_clients))
|| (stream->raw_sink != NULL && atomic_load(&stream->raw_sink->has_clients))
);
}
static int _stream_init_loop(us_stream_s *stream) {
us_stream_runtime_s *const run = stream->run;
us_blank_s *const blank = run->blank;
us_frame_s *new = NULL;
bool waiting_reported = false;
while (!atomic_load(&stream->run->stop)) {
# ifdef WITH_GPIO
us_gpio_set_stream_online(false);
# endif
if (frame != NULL) {
new = frame;
_RUN(last_as_blank_ts) = 0; // Останавливаем таймер
US_LOG_DEBUG("Exposed ALIVE video frame");
} else {
unsigned width = stream->dev->run->width;
unsigned height = stream->dev->run->height;
if (width == 0 || height == 0) {
width = stream->dev->width;
height = stream->dev->height;
// Флаги has_clients у синков не обновляются сами по себе, поэтому обновим их
// на каждой итерации старта стрима. После старта этим будут заниматься воркеры.
if (stream->jpeg_sink != NULL) {
us_memsink_server_check(stream->jpeg_sink, NULL);
}
if (stream->run->h264 != NULL) {
us_memsink_server_check(stream->run->h264->sink, NULL);
}
if (stream->raw_sink != NULL) {
us_memsink_server_check(stream->raw_sink, NULL);
}
us_blank_draw(blank, "< NO SIGNAL >", width, height);
if (run->last_online) { // Если переходим из online в offline
if (stream->last_as_blank < 0) { // Если last_as_blank выключен, просто покажем старую картинку
new = blank->jpeg;
US_LOG_INFO("Changed video frame to BLANK");
} else if (stream->last_as_blank > 0) { // // Если нужен таймер - запустим
_RUN(last_as_blank_ts) = us_get_now_monotonic() + stream->last_as_blank;
US_LOG_INFO("Freezed last ALIVE video frame for %d seconds", stream->last_as_blank);
} else { // last_as_blank == 0 - показываем последний фрейм вечно
US_LOG_INFO("Freezed last ALIVE video frame forever");
_stream_check_suicide(stream);
stream->dev->dma_export = (
stream->enc->type == US_ENCODER_TYPE_M2M_VIDEO
|| stream->enc->type == US_ENCODER_TYPE_M2M_IMAGE
|| run->h264 != NULL
);
switch (us_device_open(stream->dev)) {
case -2:
if (!waiting_reported) {
waiting_reported = true;
US_LOG_INFO("Waiting for the capture device ...");
}
goto offline_and_retry;
case -1:
waiting_reported = false;
goto offline_and_retry;
default: break;
}
us_encoder_open(stream->enc, stream->dev);
return 0;
offline_and_retry:
for (uint count = 0; count < stream->error_delay * 10; ++count) {
if (atomic_load(&run->stop)) {
break;
}
} else if (stream->last_as_blank < 0) {
new = blank->jpeg;
// US_LOG_INFO("Changed video frame to BLANK");
}
if (count % 10 == 0) {
// Каждую секунду повторяем blank
uint width = stream->dev->run->width;
uint height = stream->dev->run->height;
if (width == 0 || height == 0) {
width = stream->dev->width;
height = stream->dev->height;
}
us_blank_draw(run->blank, "< NO SIGNAL >", width, height);
if ( // Если уже оффлайн, включена фича last_as_blank с таймером и он запущен
stream->last_as_blank > 0
&& _RUN(last_as_blank_ts) != 0
&& _RUN(last_as_blank_ts) < us_get_now_monotonic()
) {
new = blank->jpeg;
_RUN(last_as_blank_ts) = 0; // Останавливаем таймер
US_LOG_INFO("Changed last ALIVE video frame to BLANK");
_stream_set_capture_state(stream, width, height, false, 0);
_stream_expose_jpeg(stream, run->blank->jpeg);
if (run->h264 != NULL) {
us_h264_stream_process(run->h264, run->blank->raw, true);
}
_stream_expose_raw(stream, run->blank->raw);
}
usleep(100 * 1000);
}
}
return -1;
}
int ri = -1;
while (
!atomic_load(&_RUN(stop))
&& ((ri = us_ring_producer_acquire(run->http_jpeg_ring, 0)) < 0)
) {
US_LOG_ERROR("Can't push JPEG to HTTP ring (no free slots)");
static void _stream_expose_jpeg(us_stream_s *stream, const us_frame_s *frame) {
us_stream_runtime_s *const run = stream->run;
int ri;
while ((ri = us_ring_producer_acquire(run->http_jpeg_ring, 0)) < 0) {
if (atomic_load(&run->stop)) {
return;
}
}
if (ri < 0) {
return;
}
us_frame_s *const dest = run->http_jpeg_ring->items[ri];
if (new == NULL) {
dest->used = 0;
dest->online = false;
} else {
us_frame_copy(new, dest);
dest->online = true;
}
run->last_online = (frame != NULL);
us_frame_copy(frame, dest);
us_ring_producer_release(run->http_jpeg_ring, ri);
_SINK_PUT(sink, (frame != NULL ? frame : blank->jpeg));
if (frame == NULL) {
_SINK_PUT(raw_sink, blank->raw);
_H264_PUT(blank->raw, false);
if (stream->jpeg_sink != NULL) {
us_memsink_server_put(stream->jpeg_sink, dest, NULL);
}
}
static void _stream_expose_raw(us_stream_s *stream, const us_frame_s *frame) {
if (stream->raw_sink != NULL) {
us_memsink_server_put(stream->raw_sink, frame, NULL);
}
}
static void _stream_check_suicide(us_stream_s *stream) {
if (stream->exit_on_no_clients == 0) {
return;
}
us_stream_runtime_s *const run = stream->run;
const ldf now_ts = us_get_now_monotonic();
const ull http_last_request_ts = atomic_load(&run->http_last_request_ts); // Seconds
if (_stream_has_any_clients_cached(stream)) {
atomic_store(&run->http_last_request_ts, now_ts);
} else if (http_last_request_ts + stream->exit_on_no_clients < now_ts) {
US_LOG_INFO("No requests or HTTP/sink clients found in last %u seconds, exiting ...",
stream->exit_on_no_clients);
us_process_suicide();
atomic_store(&run->http_last_request_ts, now_ts);
}
}

View File

@@ -22,42 +22,29 @@
#pragma once
#include <stdlib.h>
#include <stdbool.h>
#include <stdatomic.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
#include <pthread.h>
#include <linux/videodev2.h>
#include "../libs/tools.h"
#include "../libs/threading.h"
#include "../libs/logging.h"
#include "../libs/types.h"
#include "../libs/queue.h"
#include "../libs/ring.h"
#include "../libs/frame.h"
#include "../libs/memsink.h"
#include "../libs/device.h"
#include "blank.h"
#include "encoder.h"
#include "workers.h"
#include "h264.h"
#ifdef WITH_GPIO
# include "gpio/gpio.h"
#endif
typedef struct {
us_h264_stream_s *h264;
us_ring_s *http_jpeg_ring;
atomic_bool http_has_clients;
atomic_uint captured_fps;
bool last_online;
long double last_as_blank_ts;
us_h264_stream_s *h264;
atomic_uint http_snapshot_requested;
atomic_ullong http_last_request_ts; // Seconds
atomic_ullong http_capture_state; // Bits
us_blank_s *blank;
@@ -70,14 +57,15 @@ typedef struct {
int last_as_blank;
bool slowdown;
unsigned error_delay;
uint error_delay;
uint exit_on_no_clients;
us_memsink_s *sink;
us_memsink_s *jpeg_sink;
us_memsink_s *raw_sink;
us_memsink_s *h264_sink;
unsigned h264_bitrate;
unsigned h264_gop;
uint h264_bitrate;
uint h264_gop;
char *h264_m2m_path;
us_stream_runtime_s *run;
@@ -90,4 +78,4 @@ void us_stream_destroy(us_stream_s *stream);
void us_stream_loop(us_stream_s *stream);
void us_stream_loop_break(us_stream_s *stream);
bool us_stream_has_clients(us_stream_s *stream);
void us_stream_get_capture_state(us_stream_s *stream, uint *width, uint *height, bool *online, uint *captured_fps);

View File

@@ -62,7 +62,7 @@ us_workers_pool_s *us_workers_pool_init(
WR(pool) = pool;
WR(job) = job_init(job_init_arg);
US_THREAD_CREATE(WR(tid), _worker_thread, (void *)&(pool->workers[number]));
US_THREAD_CREATE(WR(tid), _worker_thread, (void*)&(pool->workers[number]));
pool->free_workers += 1;
# undef WR
@@ -176,9 +176,9 @@ long double us_workers_pool_get_fluency_delay(us_workers_pool_s *pool, const us_
}
static void *_worker_thread(void *v_worker) {
us_worker_s *wr = (us_worker_s *)v_worker;
us_worker_s *wr = v_worker;
US_THREAD_RENAME("%s", wr->name);
US_THREAD_SETTLE("%s", wr->name);
US_LOG_DEBUG("Hello! I am a worker %s ^_^", wr->name);
while (!atomic_load(&wr->pool->stop)) {

View File

@@ -44,15 +44,14 @@
#include "../libs/frametext.h"
static void _drm_vsync_callback(int fd, uint n_frame, uint sec, uint usec, void *v_run);
static int _drm_expose_raw(us_drm_s *drm, const us_frame_s *frame);
static void _drm_cleanup(us_drm_s *drm);
static int _drm_ensure(us_drm_s *drm, const us_frame_s *frame, float hz);
static void _drm_vsync_callback(int fd, uint n_frame, uint sec, uint usec, void *v_buf);
static int _drm_check_status(us_drm_s *drm);
static void _drm_ensure_dpms_power(us_drm_s *drm, bool on);
static int _drm_init_buffers(us_drm_s *drm, const us_device_s *dev);
static int _drm_find_sink(us_drm_s *drm, uint width, uint height, float hz);
static int _drm_init_buffers(us_drm_s *drm);
static int _drm_start_video(us_drm_s *drm);
static drmModeModeInfo *_find_best_mode(drmModeConnector *conn, uint width, uint height, float hz);
static u32 _find_dpms(int fd, drmModeConnector *conn);
static u32 _find_crtc(int fd, drmModeRes *res, drmModeConnector *conn, u32 *taken_crtcs);
static const char *_connector_type_to_string(u32 type);
static float _get_refresh_rate(const drmModeModeInfo *mode);
@@ -70,32 +69,203 @@ us_drm_s *us_drm_init(void) {
US_CALLOC(run, 1);
run->fd = -1;
run->status_fd = -1;
run->dpms_state = -1;
run->has_vsync = true;
run->exposing_dma_fd = -1;
run->ft = us_frametext_init();
run->state = US_DRM_STATE_CLOSED;
us_drm_s *drm;
US_CALLOC(drm, 1);
drm->path = "/dev/dri/card0";
// drm->path = "/dev/dri/card0";
drm->path = "/dev/dri/by-path/platform-gpu-card";
drm->port = "HDMI-A-1";
drm->n_bufs = 4;
drm->timeout = 5;
drm->run = run;
return drm;
}
void us_drm_destroy(us_drm_s *drm) {
_drm_cleanup(drm);
us_frametext_destroy(drm->run->ft);
US_DELETE(drm->run, free);
US_DELETE(drm, free); // cppcheck-suppress uselessAssignmentPtrArg
}
int us_drm_open(us_drm_s *drm, const us_device_s *dev) {
us_drm_runtime_s *const run = drm->run;
assert(run->fd < 0);
switch (_drm_check_status(drm)) {
case 0: break;
case -2: goto unplugged;
default: goto error;
}
_D_LOG_INFO("Configuring DRM device for %s ...", (dev == NULL ? "STUB" : "DMA"));
if ((run->fd = open(drm->path, O_RDWR | O_CLOEXEC | O_NONBLOCK)) < 0) {
_D_LOG_PERROR("Can't open DRM device");
goto error;
}
_D_LOG_DEBUG("DRM device fd=%d opened", run->fd);
int stub = 0; // Open the real device with DMA
if (dev == NULL) {
stub = US_DRM_STUB_USER;
} else if (dev->run->format != V4L2_PIX_FMT_RGB24) {
stub = US_DRM_STUB_BAD_FORMAT;
char fourcc_str[8];
us_fourcc_to_string(dev->run->format, fourcc_str, 8);
_D_LOG_ERROR("Input format %s is not supported, forcing to STUB ...", fourcc_str);
}
# define CHECK_CAP(x_cap) { \
_D_LOG_DEBUG("Checking %s ...", #x_cap); \
u64 m_check; \
if (drmGetCap(run->fd, x_cap, &m_check) < 0) { \
_D_LOG_PERROR("Can't check " #x_cap); \
goto error; \
} \
if (!m_check) { \
_D_LOG_ERROR(#x_cap " is not supported"); \
goto error; \
} \
}
CHECK_CAP(DRM_CAP_DUMB_BUFFER);
if (stub == 0) {
CHECK_CAP(DRM_CAP_PRIME);
}
# undef CHECK_CAP
const uint width = (stub > 0 ? 0 : dev->run->width);
const uint height = (stub > 0 ? 0 : dev->run->height);
const uint hz = (stub > 0 ? 0 : dev->run->hz);
switch (_drm_find_sink(drm, width, height, hz)) {
case 0: break;
case -2: goto unplugged;
default: goto error;
}
if ((stub == 0) && (width != run->mode.hdisplay || height < run->mode.vdisplay)) {
// We'll try to show something instead of nothing if height != vdisplay
stub = US_DRM_STUB_BAD_RESOLUTION;
_D_LOG_ERROR("There is no appropriate modes for the capture, forcing to STUB ...");
}
if (_drm_init_buffers(drm, (stub > 0 ? NULL : dev)) < 0) {
goto error;
}
run->saved_crtc = drmModeGetCrtc(run->fd, run->crtc_id);
_D_LOG_DEBUG("Setting up CRTC ...");
if (drmModeSetCrtc(run->fd, run->crtc_id, run->bufs[0].id, 0, 0, &run->conn_id, 1, &run->mode) < 0) {
_D_LOG_PERROR("Can't set CRTC");
goto error;
}
run->opened_for_stub = (stub > 0);
run->exposing_dma_fd = -1;
run->unplugged_reported = false;
_D_LOG_INFO("Opened for %s ...", (run->opened_for_stub ? "STUB" : "DMA"));
return stub;
error:
us_drm_close(drm);
return -1;
unplugged:
if (!run->unplugged_reported) {
_D_LOG_ERROR("Display is not plugged");
run->unplugged_reported = true;
}
us_drm_close(drm);
return -2;
}
void us_drm_close(us_drm_s *drm) {
us_drm_runtime_s *const run = drm->run;
if (run->exposing_dma_fd >= 0) {
// Нужно подождать, пока dma_fd не освободится, прежде чем прерывать процесс.
// Просто на всякий случай.
assert(run->fd >= 0);
us_drm_wait_for_vsync(drm);
run->exposing_dma_fd = -1;
}
if (run->saved_crtc != NULL) {
_D_LOG_DEBUG("Restoring CRTC ...");
if (drmModeSetCrtc(run->fd,
run->saved_crtc->crtc_id, run->saved_crtc->buffer_id,
run->saved_crtc->x, run->saved_crtc->y,
&run->conn_id, 1, &run->saved_crtc->mode
) < 0 && errno != ENOENT) {
_D_LOG_PERROR("Can't restore CRTC");
}
drmModeFreeCrtc(run->saved_crtc);
run->saved_crtc = NULL;
}
if (run->bufs != NULL) {
_D_LOG_DEBUG("Releasing buffers ...");
for (uint n_buf = 0; n_buf < run->n_bufs; ++n_buf) {
us_drm_buffer_s *const buf = &run->bufs[n_buf];
if (buf->fb_added && drmModeRmFB(run->fd, buf->id) < 0) {
_D_LOG_PERROR("Can't remove buffer=%u", n_buf);
}
if (buf->dumb_created) {
struct drm_mode_destroy_dumb destroy = {.handle = buf->handle};
if (drmIoctl(run->fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy) < 0) {
_D_LOG_PERROR("Can't destroy dumb buffer=%u", n_buf);
}
}
if (buf->data != NULL && munmap(buf->data, buf->allocated)) {
_D_LOG_PERROR("Can't unmap buffer=%u", n_buf);
}
}
US_DELETE(run->bufs, free);
run->n_bufs = 0;
}
const bool say = (run->fd >= 0);
US_CLOSE_FD(run->status_fd);
US_CLOSE_FD(run->fd);
run->crtc_id = 0;
run->dpms_state = -1;
run->has_vsync = true;
run->stub_n_buf = 0;
if (say) {
_D_LOG_INFO("Closed");
}
}
int us_drm_dpms_power_off(us_drm_s *drm) {
assert(drm->run->fd >= 0);
switch (_drm_check_status(drm)) {
case 0: break;
case -2: return 0; // Unplugged, nice
// Во время переключения DPMS монитор моргает один раз состоянием disconnected,
// а потом почему-то снова оказывается connected. Так что просто считаем,
// что отсоединенный монитор на этом этапе - это нормально.
default: return -1;
}
_drm_ensure_dpms_power(drm, false);
return 0;
}
int us_drm_wait_for_vsync(us_drm_s *drm) {
us_drm_runtime_s *const run = drm->run;
if (_drm_ensure(drm, NULL, 0) < 0) {
return -1;
assert(run->fd >= 0);
switch (_drm_check_status(drm)) {
case 0: break;
case -2: return -2;
default: return -1;
}
_drm_ensure_dpms_power(drm, true);
if (run->has_vsync) {
return 0;
}
@@ -109,10 +279,10 @@ int us_drm_wait_for_vsync(us_drm_s *drm) {
const int result = select(run->fd + 1, &fds, NULL, NULL, &timeout);
if (result < 0) {
_D_LOG_PERROR("Can't select(%d) device for VSync", run->fd);
goto error;
return -1;
} else if (result == 0) {
_D_LOG_ERROR("Device timeout while waiting VSync");
goto error;
return -1;
}
drmEventContext ctx = {
@@ -122,274 +292,152 @@ int us_drm_wait_for_vsync(us_drm_s *drm) {
_D_LOG_DEBUG("Handling DRM event (maybe VSync) ...");
if (drmHandleEvent(run->fd, &ctx) < 0) {
_D_LOG_PERROR("Can't handle DRM event");
goto error;
}
return 0;
error:
_drm_cleanup(drm);
_D_LOG_ERROR("Device destroyed due an error (vsync)");
return -1;
}
int us_drm_expose(us_drm_s *drm, us_drm_expose_e ex, const us_frame_s *frame, float hz) {
us_drm_runtime_s *const run = drm->run;
if (_drm_ensure(drm, frame, hz) < 0) {
return -1;
}
const drmModeModeInfo *const mode = &run->mode;
bool msg_drawn = false;
# define DRAW_MSG(x_msg) { \
us_frametext_draw(run->ft, (x_msg), mode->hdisplay, mode->vdisplay); \
frame = run->ft->frame; \
msg_drawn = true; \
}
if (frame == NULL) {
switch (ex) {
case US_DRM_EXPOSE_NO_SIGNAL:
DRAW_MSG("=== PiKVM ===\n \n< NO SIGNAL >");
break;
case US_DRM_EXPOSE_BUSY:
DRAW_MSG("=== PiKVM ===\n \n< ONLINE IS ACTIVE >");
break;
default:
DRAW_MSG("=== PiKVM ===\n \n< ??? >");
}
} else if (mode->hdisplay != frame->width/* || mode->vdisplay != frame->height*/) {
// XXX: At least we'll try to show something instead of nothing ^^^
char msg[1024];
US_SNPRINTF(msg, 1023,
"=== PiKVM ==="
"\n \n< UNSUPPORTED RESOLUTION >"
"\n \n< %ux%up%.02f >"
"\n \nby this display",
frame->width, frame->height, hz);
DRAW_MSG(msg);
} else if (frame->format != V4L2_PIX_FMT_RGB24) {
DRAW_MSG(
"=== PiKVM ==="
"\n \n< UNSUPPORTED CAPTURE FORMAT >"
"\n \nIt shouldn't happen ever."
"\n \nPlease check the logs and report a bug:"
"\n \n- https://github.com/pikvm/pikvm -");
}
# undef DRAW_MSG
if (_drm_expose_raw(drm, frame) < 0) {
_drm_cleanup(drm);
_D_LOG_ERROR("Device destroyed due an error (expose)");
}
return (msg_drawn ? -1 : 0);
return 0;
}
static void _drm_vsync_callback(int fd, uint n_frame, uint sec, uint usec, void *v_run) {
static void _drm_vsync_callback(int fd, uint n_frame, uint sec, uint usec, void *v_buf) {
(void)fd;
(void)n_frame;
(void)sec;
(void)usec;
us_drm_runtime_s *const run = v_run;
run->has_vsync = true;
us_drm_buffer_s *const buf = v_buf;
*buf->ctx.has_vsync = true;
*buf->ctx.exposing_dma_fd = -1;
_D_LOG_DEBUG("Got VSync signal");
}
static int _drm_expose_raw(us_drm_s *drm, const us_frame_s *frame) {
us_drm_runtime_s *const run = drm->run;
us_drm_buffer_s *const buf = &run->bufs[run->next_n_buf];
_D_LOG_DEBUG("Exposing%s framebuffer n_buf=%u, vsync=%d ...",
(frame == NULL ? " EMPTY" : ""), run->next_n_buf, run->has_vsync);
if (frame == NULL) {
memset(buf->data, 0, buf->allocated);
} else {
memcpy(buf->data, frame->data, US_MIN(frame->used, buf->allocated));
}
run->has_vsync = false;
const int retval = drmModePageFlip(
run->fd, run->crtc_id, buf->id,
DRM_MODE_PAGE_FLIP_EVENT | DRM_MODE_PAGE_FLIP_ASYNC,
run);
run->next_n_buf = (run->next_n_buf + 1) % run->n_bufs;
return retval;
}
static void _drm_cleanup(us_drm_s *drm) {
int us_drm_expose_stub(us_drm_s *drm, us_drm_stub_e stub, const us_device_s *dev) {
us_drm_runtime_s *const run = drm->run;
_D_LOG_DEBUG("Cleaning up ...");
if (run->saved_crtc != NULL) {
if (drmModeSetCrtc(run->fd,
run->saved_crtc->crtc_id, run->saved_crtc->buffer_id,
run->saved_crtc->x, run->saved_crtc->y,
&run->conn_id, 1, &run->saved_crtc->mode
) < 0 && errno != ENOENT) {
_D_LOG_PERROR("Can't restore CRTC");
}
drmModeFreeCrtc(run->saved_crtc);
run->saved_crtc = NULL;
}
if (run->bufs != NULL) {
for (uint n_buf = 0; n_buf < run->n_bufs; ++n_buf) {
us_drm_buffer_s *const buf = &run->bufs[n_buf];
if (buf->data != NULL && munmap(buf->data, buf->allocated)) {
_D_LOG_PERROR("Can't unmap buffer=%u", n_buf);
}
if (buf->fb_added && drmModeRmFB(run->fd, buf->id) < 0) {
_D_LOG_PERROR("Can't remove buffer=%u", n_buf);
}
if (buf->dumb_created) {
struct drm_mode_destroy_dumb destroy = {.handle = buf->handle};
if (drmIoctl(run->fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy) < 0) {
_D_LOG_PERROR("Can't destroy dumb buffer=%u", n_buf);
}
}
}
US_DELETE(run->bufs, free);
run->n_bufs = 0;
}
US_CLOSE_FD(run->status_fd);
US_CLOSE_FD(run->fd);
run->crtc_id = 0;
run->next_n_buf = 0;
run->has_vsync = false;
if (run->state == US_DRM_STATE_OK) {
_D_LOG_INFO("Stopped");
}
run->state = US_DRM_STATE_CLOSED;
}
static int _drm_ensure(us_drm_s *drm, const us_frame_s *frame, float hz) {
us_drm_runtime_s *const run = drm->run;
assert(run->fd >= 0);
assert(run->opened_for_stub);
switch (_drm_check_status(drm)) {
case 0: break;
case -2: goto unplugged;
default: goto error;
case -2: return -2;
default: return -1;
}
_drm_ensure_dpms_power(drm, true);
if (frame == NULL && run->state == US_DRM_STATE_OK) {
return 0;
} else if (
frame != NULL
&& run->p_width == frame->width
&& run->p_height == frame->height
&& run->p_hz == hz
&& run->state <= US_DRM_STATE_CLOSED
) {
return (run->state == US_DRM_STATE_OK ? 0 : -1);
# define DRAW_MSG(x_msg) us_frametext_draw(run->ft, (x_msg), run->mode.hdisplay, run->mode.vdisplay)
switch (stub) {
case US_DRM_STUB_BAD_RESOLUTION: {
assert(dev != NULL);
char msg[1024];
US_SNPRINTF(msg, 1023,
"=== PiKVM ==="
"\n \n< UNSUPPORTED RESOLUTION >"
"\n \n< %ux%up%.02f >"
"\n \nby this display",
dev->run->width, dev->run->height, dev->run->hz);
DRAW_MSG(msg);
break;
};
case US_DRM_STUB_BAD_FORMAT:
DRAW_MSG(
"=== PiKVM ==="
"\n \n< UNSUPPORTED CAPTURE FORMAT >"
"\n \nIt shouldn't happen ever."
"\n \nPlease check the logs and report a bug:"
"\n \n- https://github.com/pikvm/pikvm -");
break;
case US_DRM_STUB_NO_SIGNAL:
DRAW_MSG("=== PiKVM ===\n \n< NO SIGNAL >");
break;
case US_DRM_STUB_BUSY:
DRAW_MSG("=== PiKVM ===\n \n< ONLINE IS ACTIVE >");
break;
default:
DRAW_MSG("=== PiKVM ===\n \n< ??? >");
break;
}
# undef DRAW_MSG
const us_drm_state_e saved_state = run->state;
_drm_cleanup(drm);
if (saved_state > US_DRM_STATE_CLOSED) {
run->state = saved_state;
us_drm_buffer_s *const buf = &run->bufs[run->stub_n_buf];
run->has_vsync = false;
_D_LOG_DEBUG("Copying STUB frame ...")
memcpy(buf->data, run->ft->frame->data, US_MIN(run->ft->frame->used, buf->allocated));
_D_LOG_DEBUG("Exposing STUB framebuffer n_buf=%u ...", run->stub_n_buf);
const int retval = drmModePageFlip(
run->fd, run->crtc_id, buf->id,
DRM_MODE_PAGE_FLIP_EVENT | DRM_MODE_PAGE_FLIP_ASYNC,
buf);
if (retval < 0) {
_D_LOG_PERROR("Can't expose STUB framebuffer n_buf=%u ...", run->stub_n_buf);
}
_D_LOG_DEBUG("Exposed STUB framebuffer n_buf=%u", run->stub_n_buf);
run->p_width = (frame != NULL ? frame->width : 0); // 0 for find the native resolution
run->p_height = (frame != NULL ? frame->height : 0);
run->p_hz = hz;
run->stub_n_buf = (run->stub_n_buf + 1) % run->n_bufs;
return retval;
}
_D_LOG_INFO("Configuring DRM device ...");
int us_drm_expose_dma(us_drm_s *drm, const us_hw_buffer_s *hw) {
us_drm_runtime_s *const run = drm->run;
us_drm_buffer_s *const buf = &run->bufs[hw->buf.index];
if ((run->fd = open(drm->path, O_RDWR | O_CLOEXEC | O_NONBLOCK)) < 0) {
_D_LOG_PERROR("Can't open DRM device");
goto error;
}
assert(run->fd >= 0);
assert(!run->opened_for_stub);
# define CHECK_CAP(x_cap) { \
u64 m_check; \
if (drmGetCap(run->fd, x_cap, &m_check) < 0) { \
_D_LOG_PERROR("Can't check " #x_cap); \
goto error; \
} \
if (!m_check) { \
_D_LOG_ERROR(#x_cap " is not supported"); \
goto error; \
} \
}
CHECK_CAP(DRM_CAP_DUMB_BUFFER);
// CHECK_CAP(DRM_CAP_PRIME);
# undef CHECK_CAP
switch (_drm_find_sink(drm, run->p_width, run->p_height, run->p_hz)) {
switch (_drm_check_status(drm)) {
case 0: break;
case -2: goto unplugged;
default: goto error;
case -2: return -2;
default: return -1;
}
_drm_ensure_dpms_power(drm, true);
const float mode_hz = _get_refresh_rate(&run->mode);
if (frame == NULL) {
run->p_width = run->mode.hdisplay;
run->p_height = run->mode.vdisplay;
run->p_hz = mode_hz;
run->has_vsync = false;
_D_LOG_DEBUG("Exposing DMA framebuffer n_buf=%u ...", hw->buf.index);
const int retval = drmModePageFlip(
run->fd, run->crtc_id, buf->id,
DRM_MODE_PAGE_FLIP_EVENT | DRM_MODE_PAGE_FLIP_ASYNC,
buf);
if (retval < 0) {
_D_LOG_PERROR("Can't expose DMA framebuffer n_buf=%u ...", run->stub_n_buf);
}
_D_LOG_INFO("Using %s mode: %ux%up%.02f",
drm->port, run->mode.hdisplay, run->mode.vdisplay, mode_hz);
if (_drm_init_buffers(drm) < 0) {
goto error;
}
if (_drm_start_video(drm) < 0) {
goto error;
}
_D_LOG_INFO("Showing ...");
run->state = US_DRM_STATE_OK;
return 0;
error:
_drm_cleanup(drm);
_D_LOG_ERROR("Device destroyed due an error (ensure)");
return -1;
unplugged:
if (run->state != US_DRM_STATE_NO_DISPLAY) {
_D_LOG_INFO("Display %s is not plugged", drm->port);
}
_drm_cleanup(drm);
run->state = US_DRM_STATE_NO_DISPLAY;
return -2;
_D_LOG_DEBUG("Exposed DMA framebuffer n_buf=%u", run->stub_n_buf);
run->exposing_dma_fd = hw->dma_fd;
return retval;
}
static int _drm_check_status(us_drm_s *drm) {
us_drm_runtime_s *run = drm->run;
if (run->status_fd < 0) {
_D_LOG_DEBUG("Trying to find status file ...");
struct stat st;
if (stat(drm->path, &st) < 0) {
_D_LOG_PERROR("Can't stat() DRM device");
goto error;
}
const uint mi = minor(st.st_rdev);
_D_LOG_DEBUG("DRM device minor(st_rdev)=%u", mi);
char path[128];
US_SNPRINTF(path, 127, "/sys/class/drm/card%u-%s/status", mi, drm->port);
_D_LOG_DEBUG("Opening status file %s ...", path);
if ((run->status_fd = open(path, O_RDONLY | O_CLOEXEC)) < 0) {
_D_LOG_PERROR("Can't open DRM device status file: %s", path);
_D_LOG_PERROR("Can't open status file: %s", path);
goto error;
}
_D_LOG_DEBUG("Status file fd=%d opened", run->status_fd);
}
char status_ch;
if (read(run->status_fd, &status_ch, 1) != 1) {
_D_LOG_PERROR("Can't read connector status");
_D_LOG_PERROR("Can't read status file");
goto error;
}
if (lseek(run->status_fd, 0, SEEK_SET) != 0) {
_D_LOG_PERROR("Can't rewind connector status");
_D_LOG_PERROR("Can't rewind status file");
goto error;
}
_D_LOG_DEBUG("Current display status: %c", status_ch);
return (status_ch == 'd' ? -2 : 0);
error:
@@ -397,6 +445,94 @@ error:
return -1;
}
static void _drm_ensure_dpms_power(us_drm_s *drm, bool on) {
us_drm_runtime_s *const run = drm->run;
if (run->dpms_id > 0 && run->dpms_state != (int)on) {
_D_LOG_INFO("Changing DPMS power mode: %d -> %u ...", run->dpms_state, on);
if (drmModeConnectorSetProperty(
run->fd, run->conn_id, run->dpms_id,
(on ? DRM_MODE_DPMS_ON : DRM_MODE_DPMS_OFF)
) < 0) {
_D_LOG_PERROR("Can't set DPMS power=%u (ignored)", on);
}
}
run->dpms_state = (int)on;
}
static int _drm_init_buffers(us_drm_s *drm, const us_device_s *dev) {
us_drm_runtime_s *const run = drm->run;
const uint n_bufs = (dev == NULL ? 4 : dev->run->n_bufs);
const char *name = (dev == NULL ? "STUB" : "DMA");
_D_LOG_DEBUG("Initializing %u %s buffers ...", n_bufs, name);
US_CALLOC(run->bufs, n_bufs);
for (run->n_bufs = 0; run->n_bufs < n_bufs; ++run->n_bufs) {
const uint n_buf = run->n_bufs;
us_drm_buffer_s *const buf = &run->bufs[n_buf];
buf->ctx.has_vsync = &run->has_vsync;
buf->ctx.exposing_dma_fd = &run->exposing_dma_fd;
u32 handles[4] = {0};
u32 strides[4] = {0};
u32 offsets[4] = {0};
if (dev == NULL) {
struct drm_mode_create_dumb create = {
.width = run->mode.hdisplay,
.height = run->mode.vdisplay,
.bpp = 24,
};
if (drmIoctl(run->fd, DRM_IOCTL_MODE_CREATE_DUMB, &create) < 0) {
_D_LOG_PERROR("Can't create %s buffer=%u", name, n_buf);
return -1;
}
buf->handle = create.handle;
buf->dumb_created = true;
struct drm_mode_map_dumb map = {.handle = create.handle};
if (drmIoctl(run->fd, DRM_IOCTL_MODE_MAP_DUMB, &map) < 0) {
_D_LOG_PERROR("Can't prepare dumb buffer=%u to mapping", n_buf);
return -1;
}
if ((buf->data = mmap(
NULL, create.size,
PROT_READ | PROT_WRITE, MAP_SHARED,
run->fd, map.offset
)) == MAP_FAILED) {
_D_LOG_PERROR("Can't map buffer=%u", n_buf);
return -1;
}
memset(buf->data, 0, create.size);
buf->allocated = create.size;
handles[0] = create.handle;
strides[0] = create.pitch;
} else {
if (drmPrimeFDToHandle(run->fd, dev->run->hw_bufs[n_buf].dma_fd, &buf->handle) < 0) {
_D_LOG_PERROR("Can't import DMA buffer=%u from capture device", n_buf);
return -1;
}
handles[0] = buf->handle;
strides[0] = dev->run->stride;
}
if (drmModeAddFB2(
run->fd,
run->mode.hdisplay, run->mode.vdisplay, DRM_FORMAT_RGB888,
handles, strides, offsets, &buf->id, 0
)) {
_D_LOG_PERROR("Can't setup buffer=%u", n_buf);
return -1;
}
buf->fb_added = true;
}
return 0;
}
static int _drm_find_sink(us_drm_s *drm, uint width, uint height, float hz) {
us_drm_runtime_s *const run = drm->run;
@@ -429,49 +565,29 @@ static int _drm_find_sink(us_drm_s *drm, uint width, uint height, float hz) {
drmModeFreeConnector(conn);
continue;
}
_D_LOG_DEBUG("Found connector for port %s: conn_type=%d, conn_type_id=%d",
_D_LOG_INFO("Using connector %s: conn_type=%d, conn_type_id=%d",
drm->port, conn->connector_type, conn->connector_type_id);
if (conn->connection != DRM_MODE_CONNECTED) {
_D_LOG_DEBUG("Display is not connected");
_D_LOG_ERROR("Connector for port %s has !DRM_MODE_CONNECTED", drm->port);
drmModeFreeConnector(conn);
goto done;
}
drmModeModeInfo *best = NULL;
drmModeModeInfo *closest = NULL;
drmModeModeInfo *pref = NULL;
for (int mi = 0; mi < conn->count_modes; ++mi) {
drmModeModeInfo *const mode = &conn->modes[mi];
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
continue; // Paranoia for size and discard interlaced
}
const float mode_hz = _get_refresh_rate(mode);
if (mode->hdisplay == width && mode->vdisplay == height) {
best = mode; // Any mode with exact resolution
if (hz > 0 && mode_hz == hz) {
break; // Exact mode with same freq
}
}
if (mode->hdisplay == width && mode->vdisplay < height) {
if (closest == NULL || _get_refresh_rate(closest) != hz) {
closest = mode; // Something like 1920x1080p60 for 1920x1200p60 source
}
}
if (pref == NULL && (mode->type & DRM_MODE_TYPE_PREFERRED)) {
pref = mode; // Preferred mode if nothing is found
}
}
if (best == NULL) { best = closest; }
if (best == NULL) { best = pref; }
if (best == NULL) { best = (conn->count_modes > 0 ? &conn->modes[0] : NULL); }
if (best == NULL) {
_D_LOG_ERROR("Can't find any appropriate resolutions");
drmModeModeInfo *best;
if ((best = _find_best_mode(conn, width, height, hz)) == NULL) {
_D_LOG_ERROR("Can't find any appropriate display modes");
drmModeFreeConnector(conn);
goto unplugged;
}
assert(best->hdisplay > 0);
assert(best->vdisplay > 0);
_D_LOG_INFO("Using best mode: %ux%up%.02f",
best->hdisplay, best->vdisplay, _get_refresh_rate(best));
if ((run->dpms_id = _find_dpms(run->fd, conn)) > 0) {
_D_LOG_INFO("Using DPMS: id=%u", run->dpms_id);
} else {
_D_LOG_INFO("Using DPMS: None");
}
u32 taken_crtcs = 0; // Unused here
if ((run->crtc_id = _find_crtc(run->fd, res, conn, &taken_crtcs)) == 0) {
@@ -479,6 +595,8 @@ static int _drm_find_sink(us_drm_s *drm, uint width, uint height, float hz) {
drmModeFreeConnector(conn);
goto done;
}
_D_LOG_INFO("Using CRTC: id=%u", run->crtc_id);
run->conn_id = conn->connector_id;
memcpy(&run->mode, best, sizeof(drmModeModeInfo));
@@ -495,72 +613,58 @@ unplugged:
return -2;
}
static int _drm_init_buffers(us_drm_s *drm) {
us_drm_runtime_s *const run = drm->run;
static drmModeModeInfo *_find_best_mode(drmModeConnector *conn, uint width, uint height, float hz) {
drmModeModeInfo *best = NULL;
drmModeModeInfo *closest = NULL;
drmModeModeInfo *pref = NULL;
_D_LOG_DEBUG("Initializing %u buffers ...", drm->n_bufs);
US_CALLOC(run->bufs, drm->n_bufs);
for (run->n_bufs = 0; run->n_bufs < drm->n_bufs; ++run->n_bufs) {
const uint n_buf = run->n_bufs;
us_drm_buffer_s *const buf = &run->bufs[n_buf];
struct drm_mode_create_dumb create = {
.width = run->mode.hdisplay,
.height = run->mode.vdisplay,
.bpp = 24,
};
if (drmIoctl(run->fd, DRM_IOCTL_MODE_CREATE_DUMB, &create) < 0) {
_D_LOG_PERROR("Can't create dumb buffer=%u", n_buf);
return -1;
for (int mi = 0; mi < conn->count_modes; ++mi) {
drmModeModeInfo *const mode = &conn->modes[mi];
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
continue; // Discard interlaced
}
buf->handle = create.handle;
buf->dumb_created = true;
u32 handles[4] = {create.handle};
u32 strides[4] = {create.pitch};
u32 offsets[4] = {0};
if (drmModeAddFB2(
run->fd,
run->mode.hdisplay, run->mode.vdisplay, DRM_FORMAT_RGB888,
handles, strides, offsets, &buf->id, 0
)) {
_D_LOG_PERROR("Can't setup buffer=%u", n_buf);
return -1;
const float mode_hz = _get_refresh_rate(mode);
if (mode->hdisplay == width && mode->vdisplay == height) {
best = mode; // Any mode with exact resolution
if (hz > 0 && mode_hz == hz) {
break; // Exact mode with same freq
}
}
buf->fb_added = true;
struct drm_mode_map_dumb map = {.handle = create.handle};
if (drmIoctl(run->fd, DRM_IOCTL_MODE_MAP_DUMB, &map) < 0) {
_D_LOG_PERROR("Can't prepare dumb buffer=%u to mapping", n_buf);
return -1;
if (mode->hdisplay == width && mode->vdisplay < height) {
if (closest == NULL || _get_refresh_rate(closest) != hz) {
closest = mode; // Something like 1920x1080p60 for 1920x1200p60 source
}
}
if ((buf->data = mmap(
NULL, create.size,
PROT_READ | PROT_WRITE, MAP_SHARED,
run->fd, map.offset
)) == MAP_FAILED) {
_D_LOG_PERROR("Can't map buffer=%u", n_buf);
return -1;
if (pref == NULL && (mode->type & DRM_MODE_TYPE_PREFERRED)) {
pref = mode; // Preferred mode if nothing is found
}
memset(buf->data, 0, create.size);
buf->allocated = create.size;
}
return 0;
if (best == NULL) {
best = closest;
}
if (best == NULL) {
best = pref;
}
if (best == NULL) {
best = (conn->count_modes > 0 ? &conn->modes[0] : NULL);
}
assert(best == NULL || best->hdisplay > 0);
assert(best == NULL || best->vdisplay > 0);
return best;
}
static int _drm_start_video(us_drm_s *drm) {
us_drm_runtime_s *const run = drm->run;
run->saved_crtc = drmModeGetCrtc(run->fd, run->crtc_id);
_D_LOG_DEBUG("Setting up CRTC ...");
if (drmModeSetCrtc(run->fd, run->crtc_id, run->bufs[0].id, 0, 0, &run->conn_id, 1, &run->mode) < 0) {
_D_LOG_PERROR("Can't set CRTC");
return -1;
}
if (_drm_expose_raw(drm, NULL) < 0) {
_D_LOG_PERROR("Can't flip the first page");
return -1;
static u32 _find_dpms(int fd, drmModeConnector *conn) {
for (int pi = 0; pi < conn->count_props; pi++) {
drmModePropertyPtr prop = drmModeGetProperty(fd, conn->props[pi]);
if (prop != NULL) {
if (!strcmp(prop->name, "DPMS")) {
const u32 id = prop->prop_id;
drmModeFreeProperty(prop);
return id;
}
drmModeFreeProperty(prop);
}
}
return 0;
}

View File

@@ -28,19 +28,16 @@
#include "../libs/types.h"
#include "../libs/frame.h"
#include "../libs/frametext.h"
#include "../libs/device.h"
typedef enum {
US_DRM_EXPOSE_FRAME = 0,
US_DRM_EXPOSE_NO_SIGNAL,
US_DRM_EXPOSE_BUSY,
} us_drm_expose_e;
typedef enum {
US_DRM_STATE_OK = 0,
US_DRM_STATE_CLOSED,
US_DRM_STATE_NO_DISPLAY,
} us_drm_state_e;
US_DRM_STUB_USER = 1,
US_DRM_STUB_BAD_RESOLUTION,
US_DRM_STUB_BAD_FORMAT,
US_DRM_STUB_NO_SIGNAL,
US_DRM_STUB_BUSY,
} us_drm_stub_e;
typedef struct {
u32 id;
@@ -49,34 +46,34 @@ typedef struct {
uz allocated;
bool dumb_created;
bool fb_added;
struct {
bool *has_vsync;
int *exposing_dma_fd;
} ctx;
} us_drm_buffer_s;
typedef struct {
int status_fd;
int fd;
u32 crtc_id;
u32 conn_id;
u32 dpms_id;
drmModeModeInfo mode;
us_drm_buffer_s *bufs;
uint n_bufs;
drmModeCrtc *saved_crtc;
uint next_n_buf;
int dpms_state;
bool opened_for_stub;
bool has_vsync;
int exposing_dma_fd;
uint stub_n_buf;
bool unplugged_reported;
us_frametext_s *ft;
uint p_width;
uint p_height;
float p_hz;
us_drm_state_e state;
} us_drm_runtime_s;
typedef struct {
char *path;
char *port;
uint n_bufs;
uint timeout;
us_drm_runtime_s *run;
@@ -86,5 +83,10 @@ typedef struct {
us_drm_s *us_drm_init(void);
void us_drm_destroy(us_drm_s *drm);
int us_drm_open(us_drm_s *drm, const us_device_s *dev);
void us_drm_close(us_drm_s *drm);
int us_drm_dpms_power_off(us_drm_s *drm);
int us_drm_wait_for_vsync(us_drm_s *drm);
int us_drm_expose(us_drm_s *drm, us_drm_expose_e ex, const us_frame_s *frame, float hz);
int us_drm_expose_stub(us_drm_s *drm, us_drm_stub_e stub, const us_device_s *dev);
int us_drm_expose_dma(us_drm_s *drm, const us_hw_buffer_s *hw);

View File

@@ -25,7 +25,6 @@
#include <stdatomic.h>
#include <string.h>
#include <unistd.h>
#include <signal.h>
#include <getopt.h>
#include <errno.h>
#include <assert.h>
@@ -41,6 +40,7 @@
#include "../libs/tools.h"
#include "../libs/logging.h"
#include "../libs/device.h"
#include "../libs/signal.h"
#include "../libs/options.h"
#include "drm.h"
@@ -82,7 +82,6 @@ atomic_bool _g_ustreamer_online = false;
static void _signal_handler(int signum);
static void _install_signal_handlers(void);
static void _main_loop();
static void *_follower_thread(void *v_unix_follow);
@@ -137,13 +136,13 @@ int main(int argc, char *argv[]) {
# undef OPT_NUMBER
# undef OPT_SET
_install_signal_handlers();
us_install_signals_handler(_signal_handler, false);
pthread_t follower_tid;
if (unix_follow != NULL) {
US_THREAD_CREATE(follower_tid, _follower_thread, unix_follow);
}
_main_loop(unix_follow);
_main_loop();
if (unix_follow != NULL) {
US_THREAD_JOIN(follower_tid);
}
@@ -159,103 +158,103 @@ static void _signal_handler(int signum) {
atomic_store(&_g_stop, true);
}
static void _install_signal_handlers(void) {
struct sigaction sig_act = {0};
assert(!sigemptyset(&sig_act.sa_mask));
sig_act.sa_handler = _signal_handler;
assert(!sigaddset(&sig_act.sa_mask, SIGINT));
assert(!sigaddset(&sig_act.sa_mask, SIGTERM));
assert(!sigaddset(&sig_act.sa_mask, SIGPIPE));
US_LOG_DEBUG("Installing SIGINT handler ...");
assert(!sigaction(SIGINT, &sig_act, NULL));
US_LOG_DEBUG("Installing SIGTERM handler ...");
assert(!sigaction(SIGTERM, &sig_act, NULL));
US_LOG_DEBUG("Installing SIGTERM handler ...");
assert(!sigaction(SIGPIPE, &sig_act, NULL));
}
static void _main_loop(void) {
us_drm_s *drm = us_drm_init();
drm->port = "HDMI-A-2";
us_device_s *dev = us_device_init();
dev->path = "/dev/kvmd-video";
dev->n_bufs = drm->n_bufs;
dev->n_bufs = 6;
dev->format = V4L2_PIX_FMT_RGB24;
dev->dv_timings = true;
dev->persistent = true;
dev->dma_export = true;
dev->dma_required = true;
int once = 0;
ldf blank_at_ts = 0;
int drm_opened = -1;
while (!atomic_load(&_g_stop)) {
# define CHECK(x_arg) if ((x_arg) < 0) { goto close; }
if (drm_opened <= 0) {
blank_at_ts = 0;
CHECK(drm_opened = us_drm_open(drm, NULL));
}
assert(drm_opened > 0);
if (atomic_load(&_g_ustreamer_online)) {
if (us_drm_wait_for_vsync(drm) == 0) {
us_drm_expose(drm, US_DRM_EXPOSE_BUSY, NULL, 0);
}
if (dev->run->fd >= 0) {
goto close;
} else {
_slowdown();
continue;
}
blank_at_ts = 0;
US_ONCE({ US_LOG_INFO("DRM: Online stream is active, stopping capture ..."); });
CHECK(us_drm_wait_for_vsync(drm));
CHECK(us_drm_expose_stub(drm, US_DRM_STUB_BUSY, NULL));
_slowdown();
continue;
}
if (us_device_open(dev) < 0) {
goto close;
ldf now_ts = us_get_now_monotonic();
if (blank_at_ts == 0) {
blank_at_ts = now_ts + 5;
}
if (now_ts <= blank_at_ts) {
CHECK(us_drm_wait_for_vsync(drm));
CHECK(us_drm_expose_stub(drm, US_DRM_STUB_NO_SIGNAL, NULL));
} else {
US_ONCE({ US_LOG_INFO("DRM: Turning off the display by timeout ..."); });
CHECK(us_drm_dpms_power_off(drm));
}
_slowdown();
continue;
}
once = 0;
blank_at_ts = 0;
us_drm_close(drm);
CHECK(drm_opened = us_drm_open(drm, dev));
us_hw_buffer_s *prev_hw = NULL;
while (!atomic_load(&_g_stop)) {
if (atomic_load(&_g_ustreamer_online)) {
goto close;
}
if (us_drm_wait_for_vsync(drm) < 0) {
_slowdown();
continue;
CHECK(us_drm_wait_for_vsync(drm));
if (prev_hw != NULL) {
CHECK(us_device_release_buffer(dev, prev_hw));
prev_hw = NULL;
}
bool has_read;
bool has_error;
const int selected = us_device_select(dev, &has_read, &has_error);
us_hw_buffer_s *hw;
switch (us_device_grab_buffer(dev, &hw)) {
case -2: continue; // Broken frame
case -1: goto close; // Any error
default: break; // Grabbed on >= 0
}
if (selected < 0) {
if (errno != EINTR) {
US_LOG_PERROR("Mainloop select() error");
goto close;
}
} else if (selected == 0) { // Persistent timeout
if (us_drm_expose(drm, US_DRM_EXPOSE_NO_SIGNAL, NULL, 0) < 0) {
_slowdown();
continue;
}
if (drm_opened == 0) {
CHECK(us_drm_expose_dma(drm, hw));
prev_hw = hw;
} else {
if (has_read) {
us_hw_buffer_s *hw;
const int buf_index = us_device_grab_buffer(dev, &hw);
if (buf_index >= 0) {
const int exposed = us_drm_expose(drm, US_DRM_EXPOSE_FRAME, &hw->raw, dev->run->hz);
if (us_device_release_buffer(dev, hw) < 0) {
goto close;
}
if (exposed < 0) {
_slowdown();
continue;
}
} else if (buf_index != -2) { // -2 for broken frame
goto close;
}
}
if (has_error && us_device_consume_event(dev) < 0) {
goto close;
}
CHECK(us_drm_expose_stub(drm, drm_opened, dev));
CHECK(us_device_release_buffer(dev, hw));
}
if (drm_opened > 0) {
_slowdown();
}
}
close:
us_drm_close(drm);
drm_opened = -1;
us_device_close(dev);
_slowdown();
# undef CHECK
}
us_device_destroy(dev);
@@ -263,11 +262,10 @@ static void _main_loop(void) {
}
static void *_follower_thread(void *v_unix_follow) {
US_THREAD_SETTLE("follower");
const char *path = v_unix_follow;
assert(path != NULL);
US_THREAD_RENAME("follower");
while (!atomic_load(&_g_stop)) {
int fd = socket(AF_UNIX, SOCK_STREAM, 0);
assert(fd >= 0);
@@ -276,7 +274,7 @@ static void *_follower_thread(void *v_unix_follow) {
strncpy(addr.sun_path, path, sizeof(addr.sun_path) - 1);
addr.sun_family = AF_UNIX;
const bool online = !connect(fd, (struct sockaddr *)&addr, sizeof(addr));
const bool online = !connect(fd, (struct sockaddr*)&addr, sizeof(addr));
atomic_store(&_g_ustreamer_online, online);
US_CLOSE_FD(fd); // cppcheck-suppress unreadVariable