aboutsummaryrefslogtreecommitdiff
path: root/src/plugins/ffmpeg/libavdevice
diff options
context:
space:
mode:
Diffstat (limited to 'src/plugins/ffmpeg/libavdevice')
-rw-r--r--src/plugins/ffmpeg/libavdevice/Makefile26
-rw-r--r--src/plugins/ffmpeg/libavdevice/alldevices.c52
-rw-r--r--src/plugins/ffmpeg/libavdevice/audio.c349
-rw-r--r--src/plugins/ffmpeg/libavdevice/avdevice.h41
-rw-r--r--src/plugins/ffmpeg/libavdevice/beosaudio.cpp465
-rw-r--r--src/plugins/ffmpeg/libavdevice/bktr.c321
-rw-r--r--src/plugins/ffmpeg/libavdevice/dv1394.c237
-rw-r--r--src/plugins/ffmpeg/libavdevice/dv1394.h356
-rw-r--r--src/plugins/ffmpeg/libavdevice/libdc1394.c372
-rw-r--r--src/plugins/ffmpeg/libavdevice/v4l.c355
-rw-r--r--src/plugins/ffmpeg/libavdevice/v4l2.c641
-rw-r--r--src/plugins/ffmpeg/libavdevice/vfwcap.c466
-rw-r--r--src/plugins/ffmpeg/libavdevice/x11grab.c529
13 files changed, 0 insertions, 4210 deletions
diff --git a/src/plugins/ffmpeg/libavdevice/Makefile b/src/plugins/ffmpeg/libavdevice/Makefile
deleted file mode 100644
index 361eda4..0000000
--- a/src/plugins/ffmpeg/libavdevice/Makefile
+++ /dev/null
@@ -1,26 +0,0 @@
1include $(SUBDIR)../config.mak
2
3NAME = avdevice
4FFLIBS = avformat avcodec avutil
5
6HEADERS = avdevice.h
7
8OBJS = alldevices.o
9
10# input/output devices
11OBJS-$(CONFIG_BKTR_DEMUXER) += bktr.o
12OBJS-$(CONFIG_DV1394_DEMUXER) += dv1394.o
13OBJS-$(CONFIG_OSS_DEMUXER) += audio.o
14OBJS-$(CONFIG_OSS_MUXER) += audio.o
15OBJS-$(CONFIG_V4L2_DEMUXER) += v4l2.o
16OBJS-$(CONFIG_V4L_DEMUXER) += v4l.o
17OBJS-$(CONFIG_VFWCAP_DEMUXER) += vfwcap.o
18OBJS-$(CONFIG_X11_GRAB_DEVICE_DEMUXER) += x11grab.o
19
20# external libraries
21OBJS-$(CONFIG_LIBDC1394_DEMUXER) += libdc1394.o
22
23CPP_OBJS-$(CONFIG_AUDIO_BEOS_DEMUXER) += beosaudio.o
24CPP_OBJS-$(CONFIG_AUDIO_BEOS_MUXER) += beosaudio.o
25
26include $(SUBDIR)../subdir.mak
diff --git a/src/plugins/ffmpeg/libavdevice/alldevices.c b/src/plugins/ffmpeg/libavdevice/alldevices.c
deleted file mode 100644
index 6dfd350..0000000
--- a/src/plugins/ffmpeg/libavdevice/alldevices.c
+++ /dev/null
@@ -1,52 +0,0 @@
1/*
2 * Register all the grabbing devices.
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21#include "config.h"
22#include "libavformat/avformat.h"
23
24#define REGISTER_MUXER(X,x) { \
25 extern AVOutputFormat x##_muxer; \
26 if(ENABLE_##X##_MUXER) av_register_output_format(&x##_muxer); }
27#define REGISTER_DEMUXER(X,x) { \
28 extern AVInputFormat x##_demuxer; \
29 if(ENABLE_##X##_DEMUXER) av_register_input_format(&x##_demuxer); }
30#define REGISTER_MUXDEMUX(X,x) REGISTER_MUXER(X,x); REGISTER_DEMUXER(X,x)
31
32void avdevice_register_all(void)
33{
34 static int initialized;
35
36 if (initialized)
37 return;
38 initialized = 1;
39
40 /* devices */
41 REGISTER_MUXDEMUX (AUDIO_BEOS, audio_beos);
42 REGISTER_DEMUXER (BKTR, bktr);
43 REGISTER_DEMUXER (DV1394, dv1394);
44 REGISTER_MUXDEMUX (OSS, oss);
45 REGISTER_DEMUXER (V4L2, v4l2);
46 REGISTER_DEMUXER (V4L, v4l);
47 REGISTER_DEMUXER (VFWCAP, vfwcap);
48 REGISTER_DEMUXER (X11_GRAB_DEVICE, x11_grab_device);
49
50 /* external libraries */
51 REGISTER_DEMUXER (LIBDC1394, libdc1394);
52}
diff --git a/src/plugins/ffmpeg/libavdevice/audio.c b/src/plugins/ffmpeg/libavdevice/audio.c
deleted file mode 100644
index 172c5f1..0000000
--- a/src/plugins/ffmpeg/libavdevice/audio.c
+++ /dev/null
@@ -1,349 +0,0 @@
1/*
2 * Linux audio play and grab interface
3 * Copyright (c) 2000, 2001 Fabrice Bellard.
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#include "config.h"
23#include <stdlib.h>
24#include <stdio.h>
25#include <stdint.h>
26#include <string.h>
27#include <errno.h>
28#ifdef HAVE_SOUNDCARD_H
29#include <soundcard.h>
30#else
31#include <sys/soundcard.h>
32#endif
33#include <unistd.h>
34#include <fcntl.h>
35#include <sys/ioctl.h>
36#include <sys/time.h>
37
38#include "libavutil/log.h"
39#include "libavcodec/avcodec.h"
40#include "libavformat/avformat.h"
41
42#define AUDIO_BLOCK_SIZE 4096
43
44typedef struct {
45 int fd;
46 int sample_rate;
47 int channels;
48 int frame_size; /* in bytes ! */
49 int codec_id;
50 unsigned int flip_left : 1;
51 uint8_t buffer[AUDIO_BLOCK_SIZE];
52 int buffer_ptr;
53} AudioData;
54
55static int audio_open(AudioData *s, int is_output, const char *audio_device)
56{
57 int audio_fd;
58 int tmp, err;
59 char *flip = getenv("AUDIO_FLIP_LEFT");
60
61 if (is_output)
62 audio_fd = open(audio_device, O_WRONLY);
63 else
64 audio_fd = open(audio_device, O_RDONLY);
65 if (audio_fd < 0) {
66 av_log(NULL, AV_LOG_ERROR, "%s: %s\n", audio_device, strerror(errno));
67 return AVERROR(EIO);
68 }
69
70 if (flip && *flip == '1') {
71 s->flip_left = 1;
72 }
73
74 /* non blocking mode */
75 if (!is_output)
76 fcntl(audio_fd, F_SETFL, O_NONBLOCK);
77
78 s->frame_size = AUDIO_BLOCK_SIZE;
79#if 0
80 tmp = (NB_FRAGMENTS << 16) | FRAGMENT_BITS;
81 err = ioctl(audio_fd, SNDCTL_DSP_SETFRAGMENT, &tmp);
82 if (err < 0) {
83 perror("SNDCTL_DSP_SETFRAGMENT");
84 }
85#endif
86
87 /* select format : favour native format */
88 err = ioctl(audio_fd, SNDCTL_DSP_GETFMTS, &tmp);
89
90#ifdef WORDS_BIGENDIAN
91 if (tmp & AFMT_S16_BE) {
92 tmp = AFMT_S16_BE;
93 } else if (tmp & AFMT_S16_LE) {
94 tmp = AFMT_S16_LE;
95 } else {
96 tmp = 0;
97 }
98#else
99 if (tmp & AFMT_S16_LE) {
100 tmp = AFMT_S16_LE;
101 } else if (tmp & AFMT_S16_BE) {
102 tmp = AFMT_S16_BE;
103 } else {
104 tmp = 0;
105 }
106#endif
107
108 switch(tmp) {
109 case AFMT_S16_LE:
110 s->codec_id = CODEC_ID_PCM_S16LE;
111 break;
112 case AFMT_S16_BE:
113 s->codec_id = CODEC_ID_PCM_S16BE;
114 break;
115 default:
116 av_log(NULL, AV_LOG_ERROR, "Soundcard does not support 16 bit sample format\n");
117 close(audio_fd);
118 return AVERROR(EIO);
119 }
120 err=ioctl(audio_fd, SNDCTL_DSP_SETFMT, &tmp);
121 if (err < 0) {
122 av_log(NULL, AV_LOG_ERROR, "SNDCTL_DSP_SETFMT: %s\n", strerror(errno));
123 goto fail;
124 }
125
126 tmp = (s->channels == 2);
127 err = ioctl(audio_fd, SNDCTL_DSP_STEREO, &tmp);
128 if (err < 0) {
129 av_log(NULL, AV_LOG_ERROR, "SNDCTL_DSP_STEREO: %s\n", strerror(errno));
130 goto fail;
131 }
132 if (tmp)
133 s->channels = 2;
134
135 tmp = s->sample_rate;
136 err = ioctl(audio_fd, SNDCTL_DSP_SPEED, &tmp);
137 if (err < 0) {
138 av_log(NULL, AV_LOG_ERROR, "SNDCTL_DSP_SPEED: %s\n", strerror(errno));
139 goto fail;
140 }
141 s->sample_rate = tmp; /* store real sample rate */
142 s->fd = audio_fd;
143
144 return 0;
145 fail:
146 close(audio_fd);
147 return AVERROR(EIO);
148}
149
150static int audio_close(AudioData *s)
151{
152 close(s->fd);
153 return 0;
154}
155
156/* sound output support */
157static int audio_write_header(AVFormatContext *s1)
158{
159 AudioData *s = s1->priv_data;
160 AVStream *st;
161 int ret;
162
163 st = s1->streams[0];
164 s->sample_rate = st->codec->sample_rate;
165 s->channels = st->codec->channels;
166 ret = audio_open(s, 1, s1->filename);
167 if (ret < 0) {
168 return AVERROR(EIO);
169 } else {
170 return 0;
171 }
172}
173
174static int audio_write_packet(AVFormatContext *s1, AVPacket *pkt)
175{
176 AudioData *s = s1->priv_data;
177 int len, ret;
178 int size= pkt->size;
179 uint8_t *buf= pkt->data;
180
181 while (size > 0) {
182 len = AUDIO_BLOCK_SIZE - s->buffer_ptr;
183 if (len > size)
184 len = size;
185 memcpy(s->buffer + s->buffer_ptr, buf, len);
186 s->buffer_ptr += len;
187 if (s->buffer_ptr >= AUDIO_BLOCK_SIZE) {
188 for(;;) {
189 ret = write(s->fd, s->buffer, AUDIO_BLOCK_SIZE);
190 if (ret > 0)
191 break;
192 if (ret < 0 && (errno != EAGAIN && errno != EINTR))
193 return AVERROR(EIO);
194 }
195 s->buffer_ptr = 0;
196 }
197 buf += len;
198 size -= len;
199 }
200 return 0;
201}
202
203static int audio_write_trailer(AVFormatContext *s1)
204{
205 AudioData *s = s1->priv_data;
206
207 audio_close(s);
208 return 0;
209}
210
211/* grab support */
212
213static int audio_read_header(AVFormatContext *s1, AVFormatParameters *ap)
214{
215 AudioData *s = s1->priv_data;
216 AVStream *st;
217 int ret;
218
219 if (ap->sample_rate <= 0 || ap->channels <= 0)
220 return -1;
221
222 st = av_new_stream(s1, 0);
223 if (!st) {
224 return AVERROR(ENOMEM);
225 }
226 s->sample_rate = ap->sample_rate;
227 s->channels = ap->channels;
228
229 ret = audio_open(s, 0, s1->filename);
230 if (ret < 0) {
231 av_free(st);
232 return AVERROR(EIO);
233 }
234
235 /* take real parameters */
236 st->codec->codec_type = CODEC_TYPE_AUDIO;
237 st->codec->codec_id = s->codec_id;
238 st->codec->sample_rate = s->sample_rate;
239 st->codec->channels = s->channels;
240
241 av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
242 return 0;
243}
244
245static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
246{
247 AudioData *s = s1->priv_data;
248 int ret, bdelay;
249 int64_t cur_time;
250 struct audio_buf_info abufi;
251
252 if (av_new_packet(pkt, s->frame_size) < 0)
253 return AVERROR(EIO);
254 for(;;) {
255 struct timeval tv;
256 fd_set fds;
257
258 tv.tv_sec = 0;
259 tv.tv_usec = 30 * 1000; /* 30 msecs -- a bit shorter than 1 frame at 30fps */
260
261 FD_ZERO(&fds);
262 FD_SET(s->fd, &fds);
263
264 /* This will block until data is available or we get a timeout */
265 (void) select(s->fd + 1, &fds, 0, 0, &tv);
266
267 ret = read(s->fd, pkt->data, pkt->size);
268 if (ret > 0)
269 break;
270 if (ret == -1 && (errno == EAGAIN || errno == EINTR)) {
271 av_free_packet(pkt);
272 pkt->size = 0;
273 pkt->pts = av_gettime();
274 return 0;
275 }
276 if (!(ret == 0 || (ret == -1 && (errno == EAGAIN || errno == EINTR)))) {
277 av_free_packet(pkt);
278 return AVERROR(EIO);
279 }
280 }
281 pkt->size = ret;
282
283 /* compute pts of the start of the packet */
284 cur_time = av_gettime();
285 bdelay = ret;
286 if (ioctl(s->fd, SNDCTL_DSP_GETISPACE, &abufi) == 0) {
287 bdelay += abufi.bytes;
288 }
289 /* subtract time represented by the number of bytes in the audio fifo */
290 cur_time -= (bdelay * 1000000LL) / (s->sample_rate * s->channels);
291
292 /* convert to wanted units */
293 pkt->pts = cur_time;
294
295 if (s->flip_left && s->channels == 2) {
296 int i;
297 short *p = (short *) pkt->data;
298
299 for (i = 0; i < ret; i += 4) {
300 *p = ~*p;
301 p += 2;
302 }
303 }
304 return 0;
305}
306
307static int audio_read_close(AVFormatContext *s1)
308{
309 AudioData *s = s1->priv_data;
310
311 audio_close(s);
312 return 0;
313}
314
315#ifdef CONFIG_OSS_DEMUXER
316AVInputFormat oss_demuxer = {
317 "oss",
318 NULL_IF_CONFIG_SMALL("audio grab and output"),
319 sizeof(AudioData),
320 NULL,
321 audio_read_header,
322 audio_read_packet,
323 audio_read_close,
324 .flags = AVFMT_NOFILE,
325};
326#endif
327
328#ifdef CONFIG_OSS_MUXER
329AVOutputFormat oss_muxer = {
330 "oss",
331 NULL_IF_CONFIG_SMALL("audio grab and output"),
332 "",
333 "",
334 sizeof(AudioData),
335 /* XXX: we make the assumption that the soundcard accepts this format */
336 /* XXX: find better solution with "preinit" method, needed also in
337 other formats */
338#ifdef WORDS_BIGENDIAN
339 CODEC_ID_PCM_S16BE,
340#else
341 CODEC_ID_PCM_S16LE,
342#endif
343 CODEC_ID_NONE,
344 audio_write_header,
345 audio_write_packet,
346 audio_write_trailer,
347 .flags = AVFMT_NOFILE,
348};
349#endif
diff --git a/src/plugins/ffmpeg/libavdevice/avdevice.h b/src/plugins/ffmpeg/libavdevice/avdevice.h
deleted file mode 100644
index 3d4a1a3..0000000
--- a/src/plugins/ffmpeg/libavdevice/avdevice.h
+++ /dev/null
@@ -1,41 +0,0 @@
1/*
2 * This file is part of FFmpeg.
3 *
4 * FFmpeg is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
8 *
9 * FFmpeg is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with FFmpeg; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19#ifndef FFMPEG_AVDEVICE_H
20#define FFMPEG_AVDEVICE_H
21
22#define LIBAVDEVICE_VERSION_MAJOR 52
23#define LIBAVDEVICE_VERSION_MINOR 0
24#define LIBAVDEVICE_VERSION_MICRO 0
25
26#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \
27 LIBAVDEVICE_VERSION_MINOR, \
28 LIBAVDEVICE_VERSION_MICRO)
29#define LIBAVDEVICE_VERSION AV_VERSION(LIBAVDEVICE_VERSION_MAJOR, \
30 LIBAVDEVICE_VERSION_MINOR, \
31 LIBAVDEVICE_VERSION_MICRO)
32#define LIBAVDEVICE_BUILD LIBAVDEVICE_VERSION_INT
33
34/**
35 * Initialize libavdevice and register all the input and output devices.
36 * @warning This function is not thread safe.
37 */
38void avdevice_register_all(void);
39
40#endif /* FFMPEG_AVDEVICE_H */
41
diff --git a/src/plugins/ffmpeg/libavdevice/beosaudio.cpp b/src/plugins/ffmpeg/libavdevice/beosaudio.cpp
deleted file mode 100644
index def1fad..0000000
--- a/src/plugins/ffmpeg/libavdevice/beosaudio.cpp
+++ /dev/null
@@ -1,465 +0,0 @@
1/*
2 * BeOS audio play interface
3 * Copyright (c) 2000, 2001 Fabrice Bellard.
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#include <signal.h>
23#include <stdlib.h>
24#include <stdio.h>
25#include <string.h>
26#include <unistd.h>
27#include <sys/time.h>
28
29#include <Application.h>
30#include <SoundPlayer.h>
31
32extern "C" {
33#include "libavformat/avformat.h"
34}
35
36#ifdef HAVE_BSOUNDRECORDER
37#include <SoundRecorder.h>
38using namespace BPrivate::Media::Experimental;
39#endif
40
41/* enable performance checks */
42//#define PERF_CHECK
43
44/* enable Media Kit latency checks */
45//#define LATENCY_CHECK
46
47#define AUDIO_BLOCK_SIZE 4096
48#define AUDIO_BLOCK_COUNT 8
49
50#define AUDIO_BUFFER_SIZE (AUDIO_BLOCK_SIZE*AUDIO_BLOCK_COUNT)
51
52typedef struct {
53 int fd; // UNUSED
54 int sample_rate;
55 int channels;
56 int frame_size; /* in bytes ! */
57 CodecID codec_id;
58 uint8_t buffer[AUDIO_BUFFER_SIZE];
59 int buffer_ptr;
60 /* ring buffer */
61 sem_id input_sem;
62 int input_index;
63 sem_id output_sem;
64 int output_index;
65 BSoundPlayer *player;
66#ifdef HAVE_BSOUNDRECORDER
67 BSoundRecorder *recorder;
68#endif
69 int has_quit; /* signal callbacks not to wait */
70 volatile bigtime_t starve_time;
71} AudioData;
72
73static thread_id main_thid;
74static thread_id bapp_thid;
75static int own_BApp_created = 0;
76static int refcount = 0;
77
78/* create the BApplication and Run() it */
79static int32 bapp_thread(void *arg)
80{
81 new BApplication("application/x-vnd.ffmpeg");
82 own_BApp_created = 1;
83 be_app->Run();
84 /* kill the process group */
85// kill(0, SIGINT);
86// kill(main_thid, SIGHUP);
87 return B_OK;
88}
89
90/* create the BApplication only if needed */
91static void create_bapp_if_needed(void)
92{
93 if (refcount++ == 0) {
94 /* needed by libmedia */
95 if (be_app == NULL) {
96 bapp_thid = spawn_thread(bapp_thread, "ffmpeg BApplication", B_NORMAL_PRIORITY, NULL);
97 resume_thread(bapp_thid);
98 while (!own_BApp_created)
99 snooze(50000);
100 }
101 }
102}
103
104static void destroy_bapp_if_needed(void)
105{
106 if (--refcount == 0 && own_BApp_created) {
107 be_app->Lock();
108 be_app->Quit();
109 be_app = NULL;
110 }
111}
112
113/* called back by BSoundPlayer */
114static void audioplay_callback(void *cookie, void *buffer, size_t bufferSize, const media_raw_audio_format &format)
115{
116 AudioData *s;
117 size_t len, amount;
118 unsigned char *buf = (unsigned char *)buffer;
119
120 s = (AudioData *)cookie;
121 if (s->has_quit)
122 return;
123 while (bufferSize > 0) {
124#ifdef PERF_CHECK
125 bigtime_t t;
126 t = system_time();
127#endif
128 len = MIN(AUDIO_BLOCK_SIZE, bufferSize);
129 if (acquire_sem_etc(s->output_sem, len, B_CAN_INTERRUPT, 0LL) < B_OK) {
130 s->has_quit = 1;
131 s->player->SetHasData(false);
132 return;
133 }
134 amount = MIN(len, (AUDIO_BUFFER_SIZE - s->output_index));
135 memcpy(buf, &s->buffer[s->output_index], amount);
136 s->output_index += amount;
137 if (s->output_index >= AUDIO_BUFFER_SIZE) {
138 s->output_index %= AUDIO_BUFFER_SIZE;
139 memcpy(buf + amount, &s->buffer[s->output_index], len - amount);
140 s->output_index += len-amount;
141 s->output_index %= AUDIO_BUFFER_SIZE;
142 }
143 release_sem_etc(s->input_sem, len, 0);
144#ifdef PERF_CHECK
145 t = system_time() - t;
146 s->starve_time = MAX(s->starve_time, t);
147#endif
148 buf += len;
149 bufferSize -= len;
150 }
151}
152
153#ifdef HAVE_BSOUNDRECORDER
154/* called back by BSoundRecorder */
155static void audiorecord_callback(void *cookie, bigtime_t timestamp, void *buffer, size_t bufferSize, const media_multi_audio_format &format)
156{
157 AudioData *s;
158 size_t len, amount;
159 unsigned char *buf = (unsigned char *)buffer;
160
161 s = (AudioData *)cookie;
162 if (s->has_quit)
163 return;
164
165 while (bufferSize > 0) {
166 len = MIN(bufferSize, AUDIO_BLOCK_SIZE);
167 //printf("acquire_sem(input, %d)\n", len);
168 if (acquire_sem_etc(s->input_sem, len, B_CAN_INTERRUPT, 0LL) < B_OK) {
169 s->has_quit = 1;
170 return;
171 }
172 amount = MIN(len, (AUDIO_BUFFER_SIZE - s->input_index));
173 memcpy(&s->buffer[s->input_index], buf, amount);
174 s->input_index += amount;
175 if (s->input_index >= AUDIO_BUFFER_SIZE) {
176 s->input_index %= AUDIO_BUFFER_SIZE;
177 memcpy(&s->buffer[s->input_index], buf + amount, len - amount);
178 s->input_index += len - amount;
179 }
180 release_sem_etc(s->output_sem, len, 0);
181 //printf("release_sem(output, %d)\n", len);
182 buf += len;
183 bufferSize -= len;
184 }
185}
186#endif
187
188static int audio_open(AudioData *s, int is_output, const char *audio_device)
189{
190 int p[2];
191 int ret;
192 media_raw_audio_format format;
193 media_multi_audio_format iformat;
194
195#ifndef HAVE_BSOUNDRECORDER
196 if (!is_output)
197 return AVERROR(EIO); /* not for now */
198#endif
199 s->input_sem = create_sem(AUDIO_BUFFER_SIZE, "ffmpeg_ringbuffer_input");
200 if (s->input_sem < B_OK)
201 return AVERROR(EIO);
202 s->output_sem = create_sem(0, "ffmpeg_ringbuffer_output");
203 if (s->output_sem < B_OK) {
204 delete_sem(s->input_sem);
205 return AVERROR(EIO);
206 }
207 s->input_index = 0;
208 s->output_index = 0;
209 create_bapp_if_needed();
210 s->frame_size = AUDIO_BLOCK_SIZE;
211 /* bump up the priority (avoid realtime though) */
212 set_thread_priority(find_thread(NULL), B_DISPLAY_PRIORITY+1);
213#ifdef HAVE_BSOUNDRECORDER
214 if (!is_output) {
215 bool wait_for_input = false;
216 if (audio_device && !strcmp(audio_device, "wait:"))
217 wait_for_input = true;
218 s->recorder = new BSoundRecorder(&iformat, wait_for_input, "ffmpeg input", audiorecord_callback);
219 if (wait_for_input && (s->recorder->InitCheck() == B_OK)) {
220 s->recorder->WaitForIncomingConnection(&iformat);
221 }
222 if (s->recorder->InitCheck() != B_OK || iformat.format != media_raw_audio_format::B_AUDIO_SHORT) {
223 delete s->recorder;
224 s->recorder = NULL;
225 if (s->input_sem)
226 delete_sem(s->input_sem);
227 if (s->output_sem)
228 delete_sem(s->output_sem);
229 return AVERROR(EIO);
230 }
231 s->codec_id = (iformat.byte_order == B_MEDIA_LITTLE_ENDIAN)?CODEC_ID_PCM_S16LE:CODEC_ID_PCM_S16BE;
232 s->channels = iformat.channel_count;
233 s->sample_rate = (int)iformat.frame_rate;
234 s->frame_size = iformat.buffer_size;
235 s->recorder->SetCookie(s);
236 s->recorder->SetVolume(1.0);
237 s->recorder->Start();
238 return 0;
239 }
240#endif
241 format = media_raw_audio_format::wildcard;
242 format.format = media_raw_audio_format::B_AUDIO_SHORT;
243 format.byte_order = B_HOST_IS_LENDIAN ? B_MEDIA_LITTLE_ENDIAN : B_MEDIA_BIG_ENDIAN;
244 format.channel_count = s->channels;
245 format.buffer_size = s->frame_size;
246 format.frame_rate = s->sample_rate;
247 s->player = new BSoundPlayer(&format, "ffmpeg output", audioplay_callback);
248 if (s->player->InitCheck() != B_OK) {
249 delete s->player;
250 s->player = NULL;
251 if (s->input_sem)
252 delete_sem(s->input_sem);
253 if (s->output_sem)
254 delete_sem(s->output_sem);
255 return AVERROR(EIO);
256 }
257 s->player->SetCookie(s);
258 s->player->SetVolume(1.0);
259 s->player->Start();
260 s->player->SetHasData(true);
261 return 0;
262}
263
264static int audio_close(AudioData *s)
265{
266 if (s->input_sem)
267 delete_sem(s->input_sem);
268 if (s->output_sem)
269 delete_sem(s->output_sem);
270 s->has_quit = 1;
271 if (s->player) {
272 s->player->Stop();
273 }
274 if (s->player)
275 delete s->player;
276#ifdef HAVE_BSOUNDRECORDER
277 if (s->recorder)
278 delete s->recorder;
279#endif
280 destroy_bapp_if_needed();
281 return 0;
282}
283
284/* sound output support */
285static int audio_write_header(AVFormatContext *s1)
286{
287 AudioData *s = (AudioData *)s1->priv_data;
288 AVStream *st;
289 int ret;
290
291 st = s1->streams[0];
292 s->sample_rate = st->codec->sample_rate;
293 s->channels = st->codec->channels;
294 ret = audio_open(s, 1, NULL);
295 if (ret < 0)
296 return AVERROR(EIO);
297 return 0;
298}
299
300static int audio_write_packet(AVFormatContext *s1, int stream_index,
301 const uint8_t *buf, int size, int64_t force_pts)
302{
303 AudioData *s = (AudioData *)s1->priv_data;
304 int len, ret;
305#ifdef LATENCY_CHECK
306bigtime_t lat1, lat2;
307lat1 = s->player->Latency();
308#endif
309#ifdef PERF_CHECK
310 bigtime_t t = s->starve_time;
311 s->starve_time = 0;
312 printf("starve_time: %lld \n", t);
313#endif
314 while (size > 0) {
315 int amount;
316 len = MIN(size, AUDIO_BLOCK_SIZE);
317 if (acquire_sem_etc(s->input_sem, len, B_CAN_INTERRUPT, 0LL) < B_OK)
318 return AVERROR(EIO);
319 amount = MIN(len, (AUDIO_BUFFER_SIZE - s->input_index));
320 memcpy(&s->buffer[s->input_index], buf, amount);
321 s->input_index += amount;
322 if (s->input_index >= AUDIO_BUFFER_SIZE) {
323 s->input_index %= AUDIO_BUFFER_SIZE;
324 memcpy(&s->buffer[s->input_index], buf + amount, len - amount);
325 s->input_index += len - amount;
326 }
327 release_sem_etc(s->output_sem, len, 0);
328 buf += len;
329 size -= len;
330 }
331#ifdef LATENCY_CHECK
332lat2 = s->player->Latency();
333printf("#### BSoundPlayer::Latency(): before= %lld, after= %lld\n", lat1, lat2);
334#endif
335 return 0;
336}
337
338static int audio_write_trailer(AVFormatContext *s1)
339{
340 AudioData *s = (AudioData *)s1->priv_data;
341
342 audio_close(s);
343 return 0;
344}
345
346/* grab support */
347
348static int audio_read_header(AVFormatContext *s1, AVFormatParameters *ap)
349{
350 AudioData *s = (AudioData *)s1->priv_data;
351 AVStream *st;
352 int ret;
353
354 if (!ap || ap->sample_rate <= 0 || ap->channels <= 0)
355 return -1;
356
357 st = av_new_stream(s1, 0);
358 if (!st) {
359 return AVERROR(ENOMEM);
360 }
361 s->sample_rate = ap->sample_rate;
362 s->channels = ap->channels;
363
364 ret = audio_open(s, 0, s1->filename);
365 if (ret < 0) {
366 av_free(st);
367 return AVERROR(EIO);
368 }
369 /* take real parameters */
370 st->codec->codec_type = CODEC_TYPE_AUDIO;
371 st->codec->codec_id = s->codec_id;
372 st->codec->sample_rate = s->sample_rate;
373 st->codec->channels = s->channels;
374 return 0;
375 av_set_pts_info(s1, 48, 1, 1000000); /* 48 bits pts in us */
376}
377
378static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
379{
380 AudioData *s = (AudioData *)s1->priv_data;
381 int size;
382 size_t len, amount;
383 unsigned char *buf;
384 status_t err;
385
386 if (av_new_packet(pkt, s->frame_size) < 0)
387 return AVERROR(EIO);
388 buf = (unsigned char *)pkt->data;
389 size = pkt->size;
390 while (size > 0) {
391 len = MIN(AUDIO_BLOCK_SIZE, size);
392 //printf("acquire_sem(output, %d)\n", len);
393 while ((err=acquire_sem_etc(s->output_sem, len, B_CAN_INTERRUPT, 0LL)) == B_INTERRUPTED);
394 if (err < B_OK) {
395 av_free_packet(pkt);
396 return AVERROR(EIO);
397 }
398 amount = MIN(len, (AUDIO_BUFFER_SIZE - s->output_index));
399 memcpy(buf, &s->buffer[s->output_index], amount);
400 s->output_index += amount;
401 if (s->output_index >= AUDIO_BUFFER_SIZE) {
402 s->output_index %= AUDIO_BUFFER_SIZE;
403 memcpy(buf + amount, &s->buffer[s->output_index], len - amount);
404 s->output_index += len-amount;
405 s->output_index %= AUDIO_BUFFER_SIZE;
406 }
407 release_sem_etc(s->input_sem, len, 0);
408 //printf("release_sem(input, %d)\n", len);
409 buf += len;
410 size -= len;
411 }
412 //XXX: add pts info
413 return 0;
414}
415
416static int audio_read_close(AVFormatContext *s1)
417{
418 AudioData *s = (AudioData *)s1->priv_data;
419
420 audio_close(s);
421 return 0;
422}
423
424static AVInputFormat audio_beos_demuxer = {
425 "audio_beos",
426 NULL_IF_CONFIG_SMALL("audio grab and output"),
427 sizeof(AudioData),
428 NULL,
429 audio_read_header,
430 audio_read_packet,
431 audio_read_close,
432 NULL,
433 AVFMT_NOFILE,
434};
435
436AVOutputFormat audio_beos_muxer = {
437 "audio_beos",
438 NULL_IF_CONFIG_SMALL("audio grab and output"),
439 "",
440 "",
441 sizeof(AudioData),
442#ifdef WORDS_BIGENDIAN
443 CODEC_ID_PCM_S16BE,
444#else
445 CODEC_ID_PCM_S16LE,
446#endif
447 CODEC_ID_NONE,
448 audio_write_header,
449 audio_write_packet,
450 audio_write_trailer,
451 AVFMT_NOFILE,
452};
453
454extern "C" {
455
456int audio_init(void)
457{
458 main_thid = find_thread(NULL);
459 av_register_input_format(&audio_beos_demuxer);
460 av_register_output_format(&audio_beos_muxer);
461 return 0;
462}
463
464} // "C"
465
diff --git a/src/plugins/ffmpeg/libavdevice/bktr.c b/src/plugins/ffmpeg/libavdevice/bktr.c
deleted file mode 100644
index 7b37f11..0000000
--- a/src/plugins/ffmpeg/libavdevice/bktr.c
+++ /dev/null
@@ -1,321 +0,0 @@
1/*
2 * *BSD video grab interface
3 * Copyright (c) 2002 Steve O'Hara-Smith
4 * based on
5 * Linux video grab interface
6 * Copyright (c) 2000,2001 Gerard Lantau.
7 * and
8 * simple_grab.c Copyright (c) 1999 Roger Hardiman
9 *
10 * This file is part of FFmpeg.
11 *
12 * FFmpeg is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public
14 * License as published by the Free Software Foundation; either
15 * version 2.1 of the License, or (at your option) any later version.
16 *
17 * FFmpeg is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
21 *
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with FFmpeg; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 */
26
27#include "libavformat/avformat.h"
28#if defined (HAVE_DEV_BKTR_IOCTL_METEOR_H) && defined (HAVE_DEV_BKTR_IOCTL_BT848_H)
29# include <dev/bktr/ioctl_meteor.h>
30# include <dev/bktr/ioctl_bt848.h>
31#elif defined (HAVE_MACHINE_IOCTL_METEOR_H) && defined (HAVE_MACHINE_IOCTL_BT848_H)
32# include <machine/ioctl_meteor.h>
33# include <machine/ioctl_bt848.h>
34#elif defined (HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H) && defined (HAVE_DEV_VIDEO_METEOR_IOCTL_BT848_H)
35# include <dev/video/meteor/ioctl_meteor.h>
36# include <dev/video/bktr/ioctl_bt848.h>
37#elif HAVE_DEV_IC_BT8XX_H
38# include <dev/ic/bt8xx.h>
39#endif
40#include <unistd.h>
41#include <fcntl.h>
42#include <sys/ioctl.h>
43#include <sys/mman.h>
44#include <sys/time.h>
45#include <signal.h>
46
47typedef struct {
48 int video_fd;
49 int tuner_fd;
50 int width, height;
51 int frame_rate;
52 int frame_rate_base;
53 u_int64_t per_frame;
54} VideoData;
55
56
57#define PAL 1
58#define PALBDGHI 1
59#define NTSC 2
60#define NTSCM 2
61#define SECAM 3
62#define PALN 4
63#define PALM 5
64#define NTSCJ 6
65
66/* PAL is 768 x 576. NTSC is 640 x 480 */
67#define PAL_HEIGHT 576
68#define SECAM_HEIGHT 576
69#define NTSC_HEIGHT 480
70
71#ifndef VIDEO_FORMAT
72#define VIDEO_FORMAT NTSC
73#endif
74
75static int bktr_dev[] = { METEOR_DEV0, METEOR_DEV1, METEOR_DEV2,
76 METEOR_DEV3, METEOR_DEV_SVIDEO };
77
78uint8_t *video_buf;
79size_t video_buf_size;
80u_int64_t last_frame_time;
81volatile sig_atomic_t nsignals;
82
83
84static void catchsignal(int signal)
85{
86 nsignals++;
87 return;
88}
89
90static int bktr_init(const char *video_device, int width, int height,
91 int format, int *video_fd, int *tuner_fd, int idev, double frequency)
92{
93 struct meteor_geomet geo;
94 int h_max;
95 long ioctl_frequency;
96 char *arg;
97 int c;
98 struct sigaction act, old;
99
100 if (idev < 0 || idev > 4)
101 {
102 arg = getenv ("BKTR_DEV");
103 if (arg)
104 idev = atoi (arg);
105 if (idev < 0 || idev > 4)
106 idev = 1;
107 }
108
109 if (format < 1 || format > 6)
110 {
111 arg = getenv ("BKTR_FORMAT");
112 if (arg)
113 format = atoi (arg);
114 if (format < 1 || format > 6)
115 format = VIDEO_FORMAT;
116 }
117
118 if (frequency <= 0)
119 {
120 arg = getenv ("BKTR_FREQUENCY");
121 if (arg)
122 frequency = atof (arg);
123 if (frequency <= 0)
124 frequency = 0.0;
125 }
126
127 memset(&act, 0, sizeof(act));
128 sigemptyset(&act.sa_mask);
129 act.sa_handler = catchsignal;
130 sigaction(SIGUSR1, &act, &old);
131
132 *tuner_fd = open("/dev/tuner0", O_RDONLY);
133 if (*tuner_fd < 0)
134 av_log(NULL, AV_LOG_ERROR, "Warning. Tuner not opened, continuing: %s\n", strerror(errno));
135
136 *video_fd = open(video_device, O_RDONLY);
137 if (*video_fd < 0) {
138 av_log(NULL, AV_LOG_ERROR, "%s: %s\n", video_device, strerror(errno));
139 return -1;
140 }
141
142 geo.rows = height;
143 geo.columns = width;
144 geo.frames = 1;
145 geo.oformat = METEOR_GEO_YUV_422 | METEOR_GEO_YUV_12;
146
147 switch (format) {
148 case PAL: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALBDGHI; break;
149 case PALN: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALN; break;
150 case PALM: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALM; break;
151 case SECAM: h_max = SECAM_HEIGHT; c = BT848_IFORM_F_SECAM; break;
152 case NTSC: h_max = NTSC_HEIGHT; c = BT848_IFORM_F_NTSCM; break;
153 case NTSCJ: h_max = NTSC_HEIGHT; c = BT848_IFORM_F_NTSCJ; break;
154 default: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALBDGHI; break;
155 }
156
157 if (height <= h_max / 2)
158 geo.oformat |= METEOR_GEO_EVEN_ONLY;
159
160 if (ioctl(*video_fd, METEORSETGEO, &geo) < 0) {
161 av_log(NULL, AV_LOG_ERROR, "METEORSETGEO: %s\n", strerror(errno));
162 return -1;
163 }
164
165 if (ioctl(*video_fd, BT848SFMT, &c) < 0) {
166 av_log(NULL, AV_LOG_ERROR, "BT848SFMT: %s\n", strerror(errno));
167 return -1;
168 }
169
170 c = bktr_dev[idev];
171 if (ioctl(*video_fd, METEORSINPUT, &c) < 0) {
172 av_log(NULL, AV_LOG_ERROR, "METEORSINPUT: %s\n", strerror(errno));
173 return -1;
174 }
175
176 video_buf_size = width * height * 12 / 8;
177
178 video_buf = (uint8_t *)mmap((caddr_t)0, video_buf_size,
179 PROT_READ, MAP_SHARED, *video_fd, (off_t)0);
180 if (video_buf == MAP_FAILED) {
181 av_log(NULL, AV_LOG_ERROR, "mmap: %s\n", strerror(errno));
182 return -1;
183 }
184
185 if (frequency != 0.0) {
186 ioctl_frequency = (unsigned long)(frequency*16);
187 if (ioctl(*tuner_fd, TVTUNER_SETFREQ, &ioctl_frequency) < 0)
188 av_log(NULL, AV_LOG_ERROR, "TVTUNER_SETFREQ: %s\n", strerror(errno));
189 }
190
191 c = AUDIO_UNMUTE;
192 if (ioctl(*tuner_fd, BT848_SAUDIO, &c) < 0)
193 av_log(NULL, AV_LOG_ERROR, "TVTUNER_SAUDIO: %s\n", strerror(errno));
194
195 c = METEOR_CAP_CONTINOUS;
196 ioctl(*video_fd, METEORCAPTUR, &c);
197
198 c = SIGUSR1;
199 ioctl(*video_fd, METEORSSIGNAL, &c);
200
201 return 0;
202}
203
204static void bktr_getframe(u_int64_t per_frame)
205{
206 u_int64_t curtime;
207
208 curtime = av_gettime();
209 if (!last_frame_time
210 || ((last_frame_time + per_frame) > curtime)) {
211 if (!usleep(last_frame_time + per_frame + per_frame / 8 - curtime)) {
212 if (!nsignals)
213 av_log(NULL, AV_LOG_INFO,
214 "SLEPT NO signals - %d microseconds late\n",
215 (int)(av_gettime() - last_frame_time - per_frame));
216 }
217 }
218 nsignals = 0;
219 last_frame_time = curtime;
220}
221
222
223/* note: we support only one picture read at a time */
224static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
225{
226 VideoData *s = s1->priv_data;
227
228 if (av_new_packet(pkt, video_buf_size) < 0)
229 return AVERROR(EIO);
230
231 bktr_getframe(s->per_frame);
232
233 pkt->pts = av_gettime();
234 memcpy(pkt->data, video_buf, video_buf_size);
235
236 return video_buf_size;
237}
238
239static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
240{
241 VideoData *s = s1->priv_data;
242 AVStream *st;
243 int width, height;
244 int frame_rate;
245 int frame_rate_base;
246 int format = -1;
247
248 if (ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0)
249 return -1;
250
251 width = ap->width;
252 height = ap->height;
253 frame_rate = ap->time_base.den;
254 frame_rate_base = ap->time_base.num;
255
256 st = av_new_stream(s1, 0);
257 if (!st)
258 return AVERROR(ENOMEM);
259 av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in use */
260
261 s->width = width;
262 s->height = height;
263 s->frame_rate = frame_rate;
264 s->frame_rate_base = frame_rate_base;
265 s->per_frame = ((u_int64_t)1000000 * s->frame_rate_base) / s->frame_rate;
266
267 st->codec->codec_type = CODEC_TYPE_VIDEO;
268 st->codec->pix_fmt = PIX_FMT_YUV420P;
269 st->codec->codec_id = CODEC_ID_RAWVIDEO;
270 st->codec->width = width;
271 st->codec->height = height;
272 st->codec->time_base.den = frame_rate;
273 st->codec->time_base.num = frame_rate_base;
274
275 if (ap->standard) {
276 if (!strcasecmp(ap->standard, "pal"))
277 format = PAL;
278 else if (!strcasecmp(ap->standard, "secam"))
279 format = SECAM;
280 else if (!strcasecmp(ap->standard, "ntsc"))
281 format = NTSC;
282 }
283
284 if (bktr_init(s1->filename, width, height, format,
285 &(s->video_fd), &(s->tuner_fd), -1, 0.0) < 0)
286 return AVERROR(EIO);
287
288 nsignals = 0;
289 last_frame_time = 0;
290
291 return 0;
292}
293
294static int grab_read_close(AVFormatContext *s1)
295{
296 VideoData *s = s1->priv_data;
297 int c;
298
299 c = METEOR_CAP_STOP_CONT;
300 ioctl(s->video_fd, METEORCAPTUR, &c);
301 close(s->video_fd);
302
303 c = AUDIO_MUTE;
304 ioctl(s->tuner_fd, BT848_SAUDIO, &c);
305 close(s->tuner_fd);
306
307 munmap((caddr_t)video_buf, video_buf_size);
308
309 return 0;
310}
311
312AVInputFormat bktr_demuxer = {
313 "bktr",
314 NULL_IF_CONFIG_SMALL("video grab"),
315 sizeof(VideoData),
316 NULL,
317 grab_read_header,
318 grab_read_packet,
319 grab_read_close,
320 .flags = AVFMT_NOFILE,
321};
diff --git a/src/plugins/ffmpeg/libavdevice/dv1394.c b/src/plugins/ffmpeg/libavdevice/dv1394.c
deleted file mode 100644
index 56e6122..0000000
--- a/src/plugins/ffmpeg/libavdevice/dv1394.c
+++ /dev/null
@@ -1,237 +0,0 @@
1/*
2 * Linux DV1394 interface
3 * Copyright (c) 2003 Max Krasnyansky <maxk@qualcomm.com>
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#include "config.h"
23#include <unistd.h>
24#include <fcntl.h>
25#include <errno.h>
26#include <sys/ioctl.h>
27#include <sys/mman.h>
28#include <sys/poll.h>
29#include <sys/time.h>
30#include <time.h>
31
32#include "libavformat/avformat.h"
33
34#undef DV1394_DEBUG
35
36#include "libavformat/dv.h"
37#include "dv1394.h"
38
39struct dv1394_data {
40 int fd;
41 int channel;
42 int format;
43
44 uint8_t *ring; /* Ring buffer */
45 int index; /* Current frame index */
46 int avail; /* Number of frames available for reading */
47 int done; /* Number of completed frames */
48
49 DVDemuxContext* dv_demux; /* Generic DV muxing/demuxing context */
50};
51
52/*
53 * The trick here is to kludge around well known problem with kernel Ooopsing
54 * when you try to capture PAL on a device node configure for NTSC. That's
55 * why we have to configure the device node for PAL, and then read only NTSC
56 * amount of data.
57 */
58static int dv1394_reset(struct dv1394_data *dv)
59{
60 struct dv1394_init init;
61
62 init.channel = dv->channel;
63 init.api_version = DV1394_API_VERSION;
64 init.n_frames = DV1394_RING_FRAMES;
65 init.format = DV1394_PAL;
66
67 if (ioctl(dv->fd, DV1394_INIT, &init) < 0)
68 return -1;
69
70 dv->avail = dv->done = 0;
71 return 0;
72}
73
74static int dv1394_start(struct dv1394_data *dv)
75{
76 /* Tell DV1394 driver to enable receiver */
77 if (ioctl(dv->fd, DV1394_START_RECEIVE, 0) < 0) {
78 av_log(NULL, AV_LOG_ERROR, "Failed to start receiver: %s\n", strerror(errno));
79 return -1;
80 }
81 return 0;
82}
83
84static int dv1394_read_header(AVFormatContext * context, AVFormatParameters * ap)
85{
86 struct dv1394_data *dv = context->priv_data;
87
88 dv->dv_demux = dv_init_demux(context);
89 if (!dv->dv_demux)
90 goto failed;
91
92 if (ap->standard && !strcasecmp(ap->standard, "pal"))
93 dv->format = DV1394_PAL;
94 else
95 dv->format = DV1394_NTSC;
96
97 if (ap->channel)
98 dv->channel = ap->channel;
99 else
100 dv->channel = DV1394_DEFAULT_CHANNEL;
101
102 /* Open and initialize DV1394 device */
103 dv->fd = open(context->filename, O_RDONLY);
104 if (dv->fd < 0) {
105 av_log(context, AV_LOG_ERROR, "Failed to open DV interface: %s\n", strerror(errno));
106 goto failed;
107 }
108
109 if (dv1394_reset(dv) < 0) {
110 av_log(context, AV_LOG_ERROR, "Failed to initialize DV interface: %s\n", strerror(errno));
111 goto failed;
112 }
113
114 dv->ring = mmap(NULL, DV1394_PAL_FRAME_SIZE * DV1394_RING_FRAMES,
115 PROT_READ, MAP_PRIVATE, dv->fd, 0);
116 if (dv->ring == MAP_FAILED) {
117 av_log(context, AV_LOG_ERROR, "Failed to mmap DV ring buffer: %s\n", strerror(errno));
118 goto failed;
119 }
120
121 if (dv1394_start(dv) < 0)
122 goto failed;
123
124 return 0;
125
126failed:
127 close(dv->fd);
128 return AVERROR(EIO);
129}
130
131static int dv1394_read_packet(AVFormatContext *context, AVPacket *pkt)
132{
133 struct dv1394_data *dv = context->priv_data;
134 int size;
135
136 size = dv_get_packet(dv->dv_demux, pkt);
137 if (size > 0)
138 return size;
139
140 if (!dv->avail) {
141 struct dv1394_status s;
142 struct pollfd p;
143
144 if (dv->done) {
145 /* Request more frames */
146 if (ioctl(dv->fd, DV1394_RECEIVE_FRAMES, dv->done) < 0) {
147 /* This usually means that ring buffer overflowed.
148 * We have to reset :(.
149 */
150
151 av_log(context, AV_LOG_ERROR, "DV1394: Ring buffer overflow. Reseting ..\n");
152
153 dv1394_reset(dv);
154 dv1394_start(dv);
155 }
156 dv->done = 0;
157 }
158
159 /* Wait until more frames are available */
160restart_poll:
161 p.fd = dv->fd;
162 p.events = POLLIN | POLLERR | POLLHUP;
163 if (poll(&p, 1, -1) < 0) {
164 if (errno == EAGAIN || errno == EINTR)
165 goto restart_poll;
166 av_log(context, AV_LOG_ERROR, "Poll failed: %s\n", strerror(errno));
167 return AVERROR(EIO);
168 }
169
170 if (ioctl(dv->fd, DV1394_GET_STATUS, &s) < 0) {
171 av_log(context, AV_LOG_ERROR, "Failed to get status: %s\n", strerror(errno));
172 return AVERROR(EIO);
173 }
174#ifdef DV1394_DEBUG
175 av_log(context, AV_LOG_DEBUG, "DV1394: status\n"
176 "\tactive_frame\t%d\n"
177 "\tfirst_clear_frame\t%d\n"
178 "\tn_clear_frames\t%d\n"
179 "\tdropped_frames\t%d\n",
180 s.active_frame, s.first_clear_frame,
181 s.n_clear_frames, s.dropped_frames);
182#endif
183
184 dv->avail = s.n_clear_frames;
185 dv->index = s.first_clear_frame;
186 dv->done = 0;
187
188 if (s.dropped_frames) {
189 av_log(context, AV_LOG_ERROR, "DV1394: Frame drop detected (%d). Reseting ..\n",
190 s.dropped_frames);
191
192 dv1394_reset(dv);
193 dv1394_start(dv);
194 }
195 }
196
197#ifdef DV1394_DEBUG
198 av_log(context, AV_LOG_DEBUG, "index %d, avail %d, done %d\n", dv->index, dv->avail,
199 dv->done);
200#endif
201
202 size = dv_produce_packet(dv->dv_demux, pkt,
203 dv->ring + (dv->index * DV1394_PAL_FRAME_SIZE),
204 DV1394_PAL_FRAME_SIZE);
205 dv->index = (dv->index + 1) % DV1394_RING_FRAMES;
206 dv->done++; dv->avail--;
207
208 return size;
209}
210
211static int dv1394_close(AVFormatContext * context)
212{
213 struct dv1394_data *dv = context->priv_data;
214
215 /* Shutdown DV1394 receiver */
216 if (ioctl(dv->fd, DV1394_SHUTDOWN, 0) < 0)
217 av_log(context, AV_LOG_ERROR, "Failed to shutdown DV1394: %s\n", strerror(errno));
218
219 /* Unmap ring buffer */
220 if (munmap(dv->ring, DV1394_NTSC_FRAME_SIZE * DV1394_RING_FRAMES) < 0)
221 av_log(context, AV_LOG_ERROR, "Failed to munmap DV1394 ring buffer: %s\n", strerror(errno));
222
223 close(dv->fd);
224 av_free(dv->dv_demux);
225
226 return 0;
227}
228
229AVInputFormat dv1394_demuxer = {
230 .name = "dv1394",
231 .long_name = NULL_IF_CONFIG_SMALL("DV1394 A/V grab"),
232 .priv_data_size = sizeof(struct dv1394_data),
233 .read_header = dv1394_read_header,
234 .read_packet = dv1394_read_packet,
235 .read_close = dv1394_close,
236 .flags = AVFMT_NOFILE
237};
diff --git a/src/plugins/ffmpeg/libavdevice/dv1394.h b/src/plugins/ffmpeg/libavdevice/dv1394.h
deleted file mode 100644
index 7f3521d..0000000
--- a/src/plugins/ffmpeg/libavdevice/dv1394.h
+++ /dev/null
@@ -1,356 +0,0 @@
1/*
2 * dv1394.h - DV input/output over IEEE 1394 on OHCI chips
3 * Copyright (C)2001 Daniel Maas <dmaas@dcine.com>
4 * receive, proc_fs by Dan Dennedy <dan@dennedy.org>
5 *
6 * based on:
7 * video1394.h - driver for OHCI 1394 boards
8 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
9 * Peter Schlaile <udbz@rz.uni-karlsruhe.de>
10 *
11 * This file is part of FFmpeg.
12 *
13 * FFmpeg is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU Lesser General Public
15 * License as published by the Free Software Foundation; either
16 * version 2.1 of the License, or (at your option) any later version.
17 *
18 * FFmpeg is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * Lesser General Public License for more details.
22 *
23 * You should have received a copy of the GNU Lesser General Public
24 * License along with FFmpeg; if not, write to the Free Software
25 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 */
27
28#ifndef FFMPEG_DV1394_H
29#define FFMPEG_DV1394_H
30
31#define DV1394_DEFAULT_CHANNEL 63
32#define DV1394_DEFAULT_CARD 0
33#define DV1394_RING_FRAMES 20
34
35#define DV1394_WIDTH 720
36#define DV1394_NTSC_HEIGHT 480
37#define DV1394_PAL_HEIGHT 576
38
39/* This is the public user-space interface. Try not to break it. */
40
41#define DV1394_API_VERSION 0x20011127
42
43/* ********************
44 ** **
45 ** DV1394 API **
46 ** **
47 ********************
48
49 There are two methods of operating the DV1394 DV output device.
50
51 1)
52
53 The simplest is an interface based on write(): simply write
54 full DV frames of data to the device, and they will be transmitted
55 as quickly as possible. The FD may be set for non-blocking I/O,
56 in which case you can use select() or poll() to wait for output
57 buffer space.
58
59 To set the DV output parameters (e.g. whether you want NTSC or PAL
60 video), use the DV1394_INIT ioctl, passing in the parameters you
61 want in a struct dv1394_init.
62
63 Example 1:
64 To play a raw .DV file: cat foo.DV > /dev/dv1394
65 (cat will use write() internally)
66
67 Example 2:
68 static struct dv1394_init init = {
69 0x63, (broadcast channel)
70 4, (four-frame ringbuffer)
71 DV1394_NTSC, (send NTSC video)
72 0, 0 (default empty packet rate)
73 }
74
75 ioctl(fd, DV1394_INIT, &init);
76
77 while(1) {
78 read( <a raw DV file>, buf, DV1394_NTSC_FRAME_SIZE );
79 write( <the dv1394 FD>, buf, DV1394_NTSC_FRAME_SIZE );
80 }
81
82 2)
83
84 For more control over buffering, and to avoid unnecessary copies
85 of the DV data, you can use the more sophisticated the mmap() interface.
86 First, call the DV1394_INIT ioctl to specify your parameters,
87 including the number of frames in the ringbuffer. Then, calling mmap()
88 on the dv1394 device will give you direct access to the ringbuffer
89 from which the DV card reads your frame data.
90
91 The ringbuffer is simply one large, contiguous region of memory
92 containing two or more frames of packed DV data. Each frame of DV data
93 is 120000 bytes (NTSC) or 144000 bytes (PAL).
94
95 Fill one or more frames in the ringbuffer, then use the DV1394_SUBMIT_FRAMES
96 ioctl to begin I/O. You can use either the DV1394_WAIT_FRAMES ioctl
97 or select()/poll() to wait until the frames are transmitted. Next, you'll
98 need to call the DV1394_GET_STATUS ioctl to determine which ringbuffer
99 frames are clear (ready to be filled with new DV data). Finally, use
100 DV1394_SUBMIT_FRAMES again to send the new data to the DV output.
101
102
103 Example: here is what a four-frame ringbuffer might look like
104 during DV transmission:
105
106
107 frame 0 frame 1 frame 2 frame 3
108
109 *--------------------------------------*
110 | CLEAR | DV data | DV data | CLEAR |
111 *--------------------------------------*
112 <ACTIVE>
113
114 transmission goes in this direction --->>>
115
116
117 The DV hardware is currently transmitting the data in frame 1.
118 Once frame 1 is finished, it will automatically transmit frame 2.
119 (if frame 2 finishes before frame 3 is submitted, the device
120 will continue to transmit frame 2, and will increase the dropped_frames
121 counter each time it repeats the transmission).
122
123
124 If you called DV1394_GET_STATUS at this instant, you would
125 receive the following values:
126
127 n_frames = 4
128 active_frame = 1
129 first_clear_frame = 3
130 n_clear_frames = 2
131
132 At this point, you should write new DV data into frame 3 and optionally
133 frame 0. Then call DV1394_SUBMIT_FRAMES to inform the device that
134 it may transmit the new frames.
135
136 ERROR HANDLING
137
138 An error (buffer underflow/overflow or a break in the DV stream due
139 to a 1394 bus reset) can be detected by checking the dropped_frames
140 field of struct dv1394_status (obtained through the
141 DV1394_GET_STATUS ioctl).
142
143 The best way to recover from such an error is to re-initialize
144 dv1394, either by using the DV1394_INIT ioctl call, or closing the
145 file descriptor and opening it again. (note that you must unmap all
146 ringbuffer mappings when closing the file descriptor, or else
147 dv1394 will still be considered 'in use').
148
149 MAIN LOOP
150
151 For maximum efficiency and robustness against bus errors, you are
152 advised to model the main loop of your application after the
153 following pseudo-code example:
154
155 (checks of system call return values omitted for brevity; always
156 check return values in your code!)
157
158 while( frames left ) {
159
160 struct pollfd *pfd = ...;
161
162 pfd->fd = dv1394_fd;
163 pfd->revents = 0;
164 pfd->events = POLLOUT | POLLIN; (OUT for transmit, IN for receive)
165
166 (add other sources of I/O here)
167
168 poll(pfd, 1, -1); (or select(); add a timeout if you want)
169
170 if(pfd->revents) {
171 struct dv1394_status status;
172
173 ioctl(dv1394_fd, DV1394_GET_STATUS, &status);
174
175 if(status.dropped_frames > 0) {
176 reset_dv1394();
177 } else {
178 for(int i = 0; i < status.n_clear_frames; i++) {
179 copy_DV_frame();
180 }
181 }
182 }
183 }
184
185 where copy_DV_frame() reads or writes on the dv1394 file descriptor
186 (read/write mode) or copies data to/from the mmap ringbuffer and
187 then calls ioctl(DV1394_SUBMIT_FRAMES) to notify dv1394 that new
188 frames are availble (mmap mode).
189
190 reset_dv1394() is called in the event of a buffer
191 underflow/overflow or a halt in the DV stream (e.g. due to a 1394
192 bus reset). To guarantee recovery from the error, this function
193 should close the dv1394 file descriptor (and munmap() all
194 ringbuffer mappings, if you are using them), then re-open the
195 dv1394 device (and re-map the ringbuffer).
196
197*/
198
199
200/* maximum number of frames in the ringbuffer */
201#define DV1394_MAX_FRAMES 32
202
203/* number of *full* isochronous packets per DV frame */
204#define DV1394_NTSC_PACKETS_PER_FRAME 250
205#define DV1394_PAL_PACKETS_PER_FRAME 300
206
207/* size of one frame's worth of DV data, in bytes */
208#define DV1394_NTSC_FRAME_SIZE (480 * DV1394_NTSC_PACKETS_PER_FRAME)
209#define DV1394_PAL_FRAME_SIZE (480 * DV1394_PAL_PACKETS_PER_FRAME)
210
211
212/* ioctl() commands */
213
214enum {
215 /* I don't like using 0 as a valid ioctl() */
216 DV1394_INVALID = 0,
217
218
219 /* get the driver ready to transmit video.
220 pass a struct dv1394_init* as the parameter (see below),
221 or NULL to get default parameters */
222 DV1394_INIT,
223
224
225 /* stop transmitting video and free the ringbuffer */
226 DV1394_SHUTDOWN,
227
228
229 /* submit N new frames to be transmitted, where
230 the index of the first new frame is first_clear_buffer,
231 and the index of the last new frame is
232 (first_clear_buffer + N) % n_frames */
233 DV1394_SUBMIT_FRAMES,
234
235
236 /* block until N buffers are clear (pass N as the parameter)
237 Because we re-transmit the last frame on underrun, there
238 will at most be n_frames - 1 clear frames at any time */
239 DV1394_WAIT_FRAMES,
240
241 /* capture new frames that have been received, where
242 the index of the first new frame is first_clear_buffer,
243 and the index of the last new frame is
244 (first_clear_buffer + N) % n_frames */
245 DV1394_RECEIVE_FRAMES,
246
247
248 DV1394_START_RECEIVE,
249
250
251 /* pass a struct dv1394_status* as the parameter (see below) */
252 DV1394_GET_STATUS,
253};
254
255
256
257enum pal_or_ntsc {
258 DV1394_NTSC = 0,
259 DV1394_PAL
260};
261
262
263
264
265/* this is the argument to DV1394_INIT */
266struct dv1394_init {
267 /* DV1394_API_VERSION */
268 unsigned int api_version;
269
270 /* isochronous transmission channel to use */
271 unsigned int channel;
272
273 /* number of frames in the ringbuffer. Must be at least 2
274 and at most DV1394_MAX_FRAMES. */
275 unsigned int n_frames;
276
277 /* send/receive PAL or NTSC video format */
278 enum pal_or_ntsc format;
279
280 /* the following are used only for transmission */
281
282 /* set these to zero unless you want a
283 non-default empty packet rate (see below) */
284 unsigned long cip_n;
285 unsigned long cip_d;
286
287 /* set this to zero unless you want a
288 non-default SYT cycle offset (default = 3 cycles) */
289 unsigned int syt_offset;
290};
291
292/* NOTE: you may only allocate the DV frame ringbuffer once each time
293 you open the dv1394 device. DV1394_INIT will fail if you call it a
294 second time with different 'n_frames' or 'format' arguments (which
295 would imply a different size for the ringbuffer). If you need a
296 different buffer size, simply close and re-open the device, then
297 initialize it with your new settings. */
298
299/* Q: What are cip_n and cip_d? */
300
301/*
302 A: DV video streams do not utilize 100% of the potential bandwidth offered
303 by IEEE 1394 (FireWire). To achieve the correct rate of data transmission,
304 DV devices must periodically insert empty packets into the 1394 data stream.
305 Typically there is one empty packet per 14-16 data-carrying packets.
306
307 Some DV devices will accept a wide range of empty packet rates, while others
308 require a precise rate. If the dv1394 driver produces empty packets at
309 a rate that your device does not accept, you may see ugly patterns on the
310 DV output, or even no output at all.
311
312 The default empty packet insertion rate seems to work for many people; if
313 your DV output is stable, you can simply ignore this discussion. However,
314 we have exposed the empty packet rate as a parameter to support devices that
315 do not work with the default rate.
316
317 The decision to insert an empty packet is made with a numerator/denominator
318 algorithm. Empty packets are produced at an average rate of CIP_N / CIP_D.
319 You can alter the empty packet rate by passing non-zero values for cip_n
320 and cip_d to the INIT ioctl.
321
322 */
323
324
325
326struct dv1394_status {
327 /* this embedded init struct returns the current dv1394
328 parameters in use */
329 struct dv1394_init init;
330
331 /* the ringbuffer frame that is currently being
332 displayed. (-1 if the device is not transmitting anything) */
333 int active_frame;
334
335 /* index of the first buffer (ahead of active_frame) that
336 is ready to be filled with data */
337 unsigned int first_clear_frame;
338
339 /* how many buffers, including first_clear_buffer, are
340 ready to be filled with data */
341 unsigned int n_clear_frames;
342
343 /* how many times the DV stream has underflowed, overflowed,
344 or otherwise encountered an error, since the previous call
345 to DV1394_GET_STATUS */
346 unsigned int dropped_frames;
347
348 /* N.B. The dropped_frames counter is only a lower bound on the actual
349 number of dropped frames, with the special case that if dropped_frames
350 is zero, then it is guaranteed that NO frames have been dropped
351 since the last call to DV1394_GET_STATUS.
352 */
353};
354
355
356#endif /* FFMPEG_DV1394_H */
diff --git a/src/plugins/ffmpeg/libavdevice/libdc1394.c b/src/plugins/ffmpeg/libavdevice/libdc1394.c
deleted file mode 100644
index 1457d8c..0000000
--- a/src/plugins/ffmpeg/libavdevice/libdc1394.c
+++ /dev/null
@@ -1,372 +0,0 @@
1/*
2 * IIDC1394 grab interface (uses libdc1394 and libraw1394)
3 * Copyright (c) 2004 Roman Shaposhnik
4 * Copyright (c) 2008 Alessandro Sappia
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23#include "config.h"
24#include "libavformat/avformat.h"
25
26#if ENABLE_LIBDC1394_2
27#include <dc1394/dc1394.h>
28#elif ENABLE_LIBDC1394_1
29#include <libraw1394/raw1394.h>
30#include <libdc1394/dc1394_control.h>
31
32#define DC1394_VIDEO_MODE_320x240_YUV422 MODE_320x240_YUV422
33#define DC1394_VIDEO_MODE_640x480_YUV411 MODE_640x480_YUV411
34#define DC1394_VIDEO_MODE_640x480_YUV422 MODE_640x480_YUV422
35#define DC1394_FRAMERATE_1_875 FRAMERATE_1_875
36#define DC1394_FRAMERATE_3_75 FRAMERATE_3_75
37#define DC1394_FRAMERATE_7_5 FRAMERATE_7_5
38#define DC1394_FRAMERATE_15 FRAMERATE_15
39#define DC1394_FRAMERATE_30 FRAMERATE_30
40#define DC1394_FRAMERATE_60 FRAMERATE_60
41#define DC1394_FRAMERATE_120 FRAMERATE_120
42#define DC1394_FRAMERATE_240 FRAMERATE_240
43#endif
44
45#undef free
46
47typedef struct dc1394_data {
48#if ENABLE_LIBDC1394_1
49 raw1394handle_t handle;
50 dc1394_cameracapture camera;
51#elif ENABLE_LIBDC1394_2
52 dc1394_t *d;
53 dc1394camera_t *camera;
54 dc1394video_frame_t *frame;
55#endif
56 int current_frame;
57 int fps;
58
59 AVPacket packet;
60} dc1394_data;
61
62struct dc1394_frame_format {
63 int width;
64 int height;
65 enum PixelFormat pix_fmt;
66 int frame_size_id;
67} dc1394_frame_formats[] = {
68 { 320, 240, PIX_FMT_UYVY422, DC1394_VIDEO_MODE_320x240_YUV422 },
69 { 640, 480, PIX_FMT_UYYVYY411, DC1394_VIDEO_MODE_640x480_YUV411 },
70 { 640, 480, PIX_FMT_UYVY422, DC1394_VIDEO_MODE_640x480_YUV422 },
71 { 0, 0, 0, 0 } /* gotta be the last one */
72};
73
74struct dc1394_frame_rate {
75 int frame_rate;
76 int frame_rate_id;
77} dc1394_frame_rates[] = {
78 { 1875, DC1394_FRAMERATE_1_875 },
79 { 3750, DC1394_FRAMERATE_3_75 },
80 { 7500, DC1394_FRAMERATE_7_5 },
81 { 15000, DC1394_FRAMERATE_15 },
82 { 30000, DC1394_FRAMERATE_30 },
83 { 60000, DC1394_FRAMERATE_60 },
84 {120000, DC1394_FRAMERATE_120 },
85 {240000, DC1394_FRAMERATE_240 },
86 { 0, 0 } /* gotta be the last one */
87};
88
89static inline int dc1394_read_common(AVFormatContext *c, AVFormatParameters *ap,
90 struct dc1394_frame_format **select_fmt, struct dc1394_frame_rate **select_fps)
91{
92 dc1394_data* dc1394 = c->priv_data;
93 AVStream* vst;
94 struct dc1394_frame_format *fmt;
95 struct dc1394_frame_rate *fps;
96 enum PixelFormat pix_fmt = ap->pix_fmt == PIX_FMT_NONE ? PIX_FMT_UYVY422 : ap->pix_fmt; /* defaults */
97 int width = !ap->width ? 320 : ap->width;
98 int height = !ap->height ? 240 : ap->height;
99 int frame_rate = !ap->time_base.num ? 30000 : av_rescale(1000, ap->time_base.den, ap->time_base.num);
100
101 for (fmt = dc1394_frame_formats; fmt->width; fmt++)
102 if (fmt->pix_fmt == pix_fmt && fmt->width == width && fmt->height == height)
103 break;
104
105 for (fps = dc1394_frame_rates; fps->frame_rate; fps++)
106 if (fps->frame_rate == frame_rate)
107 break;
108
109 if (!fps->frame_rate || !fmt->width) {
110 av_log(c, AV_LOG_ERROR, "Can't find matching camera format for %s, %dx%d@%d:1000fps\n", avcodec_get_pix_fmt_name(pix_fmt),
111 width, height, frame_rate);
112 goto out;
113 }
114
115 /* create a video stream */
116 vst = av_new_stream(c, 0);
117 if (!vst)
118 goto out;
119 av_set_pts_info(vst, 64, 1, 1000);
120 vst->codec->codec_type = CODEC_TYPE_VIDEO;
121 vst->codec->codec_id = CODEC_ID_RAWVIDEO;
122 vst->codec->time_base.den = fps->frame_rate;
123 vst->codec->time_base.num = 1000;
124 vst->codec->width = fmt->width;
125 vst->codec->height = fmt->height;
126 vst->codec->pix_fmt = fmt->pix_fmt;
127
128 /* packet init */
129 av_init_packet(&dc1394->packet);
130 dc1394->packet.size = avpicture_get_size(fmt->pix_fmt, fmt->width, fmt->height);
131 dc1394->packet.stream_index = vst->index;
132 dc1394->packet.flags |= PKT_FLAG_KEY;
133
134 dc1394->current_frame = 0;
135 dc1394->fps = fps->frame_rate;
136
137 vst->codec->bit_rate = av_rescale(dc1394->packet.size * 8, fps->frame_rate, 1000);
138 *select_fps = fps;
139 *select_fmt = fmt;
140 return 0;
141out:
142 return -1;
143}
144
145#if ENABLE_LIBDC1394_1
146static int dc1394_v1_read_header(AVFormatContext *c, AVFormatParameters * ap)
147{
148 dc1394_data* dc1394 = c->priv_data;
149 AVStream* vst;
150 nodeid_t* camera_nodes;
151 int res;
152 struct dc1394_frame_format *fmt = NULL;
153 struct dc1394_frame_rate *fps = NULL;
154
155 if (dc1394_read_common(c,ap,&fmt,&fps) != 0)
156 return -1;
157
158 /* Now lets prep the hardware */
159 dc1394->handle = dc1394_create_handle(0); /* FIXME: gotta have ap->port */
160 if (!dc1394->handle) {
161 av_log(c, AV_LOG_ERROR, "Can't acquire dc1394 handle on port %d\n", 0 /* ap->port */);
162 goto out;
163 }
164 camera_nodes = dc1394_get_camera_nodes(dc1394->handle, &res, 1);
165 if (!camera_nodes || camera_nodes[ap->channel] == DC1394_NO_CAMERA) {
166 av_log(c, AV_LOG_ERROR, "There's no IIDC camera on the channel %d\n", ap->channel);
167 goto out_handle;
168 }
169 res = dc1394_dma_setup_capture(dc1394->handle, camera_nodes[ap->channel],
170 0,
171 FORMAT_VGA_NONCOMPRESSED,
172 fmt->frame_size_id,
173 SPEED_400,
174 fps->frame_rate_id, 8, 1,
175 c->filename,
176 &dc1394->camera);
177 dc1394_free_camera_nodes(camera_nodes);
178 if (res != DC1394_SUCCESS) {
179 av_log(c, AV_LOG_ERROR, "Can't prepare camera for the DMA capture\n");
180 goto out_handle;
181 }
182
183 res = dc1394_start_iso_transmission(dc1394->handle, dc1394->camera.node);
184 if (res != DC1394_SUCCESS) {
185 av_log(c, AV_LOG_ERROR, "Can't start isochronous transmission\n");
186 goto out_handle_dma;
187 }
188
189 return 0;
190
191out_handle_dma:
192 dc1394_dma_unlisten(dc1394->handle, &dc1394->camera);
193 dc1394_dma_release_camera(dc1394->handle, &dc1394->camera);
194out_handle:
195 dc1394_destroy_handle(dc1394->handle);
196out:
197 return -1;
198}
199
200static int dc1394_v1_read_packet(AVFormatContext *c, AVPacket *pkt)
201{
202 struct dc1394_data *dc1394 = c->priv_data;
203 int res;
204
205 /* discard stale frame */
206 if (dc1394->current_frame++) {
207 if (dc1394_dma_done_with_buffer(&dc1394->camera) != DC1394_SUCCESS)
208 av_log(c, AV_LOG_ERROR, "failed to release %d frame\n", dc1394->current_frame);
209 }
210
211 res = dc1394_dma_single_capture(&dc1394->camera);
212
213 if (res == DC1394_SUCCESS) {
214 dc1394->packet.data = (uint8_t *)(dc1394->camera.capture_buffer);
215 dc1394->packet.pts = (dc1394->current_frame * 1000000) / dc1394->fps;
216 res = dc1394->packet.size;
217 } else {
218 av_log(c, AV_LOG_ERROR, "DMA capture failed\n");
219 dc1394->packet.data = NULL;
220 res = -1;
221 }
222
223 *pkt = dc1394->packet;
224 return res;
225}
226
227static int dc1394_v1_close(AVFormatContext * context)
228{
229 struct dc1394_data *dc1394 = context->priv_data;
230
231 dc1394_stop_iso_transmission(dc1394->handle, dc1394->camera.node);
232 dc1394_dma_unlisten(dc1394->handle, &dc1394->camera);
233 dc1394_dma_release_camera(dc1394->handle, &dc1394->camera);
234 dc1394_destroy_handle(dc1394->handle);
235
236 return 0;
237}
238
239#elif ENABLE_LIBDC1394_2
240static int dc1394_v2_read_header(AVFormatContext *c, AVFormatParameters * ap)
241{
242 dc1394_data* dc1394 = c->priv_data;
243 dc1394camera_list_t *list;
244 int res, i;
245 struct dc1394_frame_format *fmt = NULL;
246 struct dc1394_frame_rate *fps = NULL;
247
248 if (dc1394_read_common(c,ap,&fmt,&fps) != 0)
249 return -1;
250
251 /* Now lets prep the hardware */
252 dc1394->d = dc1394_new();
253 dc1394_camera_enumerate (dc1394->d, &list);
254 if ( !list || list->num == 0) {
255 av_log(c, AV_LOG_ERROR, "Unable to look for an IIDC camera\n\n");
256 goto out;
257 }
258
259 /* FIXME: To select a specific camera I need to search in list its guid */
260 dc1394->camera = dc1394_camera_new (dc1394->d, list->ids[0].guid);
261 if (list->num > 1) {
262 av_log(c, AV_LOG_INFO, "Working with the first camera found\n");
263 }
264
265 /* Freeing list of cameras */
266 dc1394_camera_free_list (list);
267
268 /* Select MAX Speed possible from the cam */
269 if (dc1394->camera->bmode_capable>0) {
270 dc1394_video_set_operation_mode(dc1394->camera, DC1394_OPERATION_MODE_1394B);
271 i = DC1394_ISO_SPEED_800;
272 } else {
273 i = DC1394_ISO_SPEED_400;
274 }
275
276 for (res = DC1394_FAILURE; i >= DC1394_ISO_SPEED_MIN && res != DC1394_SUCCESS; i--) {
277 res=dc1394_video_set_iso_speed(dc1394->camera, i);
278 }
279 if (res != DC1394_SUCCESS) {
280 av_log(c, AV_LOG_ERROR, "Couldn't set ISO Speed\n");
281 goto out_camera;
282 }
283
284 if (dc1394_video_set_mode(dc1394->camera, fmt->frame_size_id) != DC1394_SUCCESS) {
285 av_log(c, AV_LOG_ERROR, "Couldn't set video format\n");
286 goto out_camera;
287 }
288
289 if (dc1394_video_set_framerate(dc1394->camera,fps->frame_rate_id) != DC1394_SUCCESS) {
290 av_log(c, AV_LOG_ERROR, "Couldn't set framerate %d \n",fps->frame_rate);
291 goto out_camera;
292 }
293 if (dc1394_capture_setup(dc1394->camera, 10, DC1394_CAPTURE_FLAGS_DEFAULT)!=DC1394_SUCCESS) {
294 av_log(c, AV_LOG_ERROR, "Cannot setup camera \n");
295 goto out_camera;
296 }
297
298 if (dc1394_video_set_transmission(dc1394->camera, DC1394_ON) !=DC1394_SUCCESS) {
299 av_log(c, AV_LOG_ERROR, "Cannot start capture\n");
300 goto out_camera;
301 }
302 return 0;
303
304out_camera:
305 dc1394_capture_stop(dc1394->camera);
306 dc1394_video_set_transmission(dc1394->camera, DC1394_OFF);
307 dc1394_camera_free (dc1394->camera);
308out:
309 dc1394_free(dc1394->d);
310 return -1;
311}
312
313static int dc1394_v2_read_packet(AVFormatContext *c, AVPacket *pkt)
314{
315 struct dc1394_data *dc1394 = c->priv_data;
316 int res;
317
318 /* discard stale frame */
319 if (dc1394->current_frame++) {
320 if (dc1394_capture_enqueue(dc1394->camera, dc1394->frame) != DC1394_SUCCESS)
321 av_log(c, AV_LOG_ERROR, "failed to release %d frame\n", dc1394->current_frame);
322 }
323
324 res = dc1394_capture_dequeue(dc1394->camera, DC1394_CAPTURE_POLICY_WAIT, &dc1394->frame);
325 if (res == DC1394_SUCCESS) {
326 dc1394->packet.data = (uint8_t *)(dc1394->frame->image);
327 dc1394->packet.pts = (dc1394->current_frame * 1000000) / (dc1394->fps);
328 res = dc1394->frame->image_bytes;
329 } else {
330 av_log(c, AV_LOG_ERROR, "DMA capture failed\n");
331 dc1394->packet.data = NULL;
332 res = -1;
333 }
334
335 *pkt = dc1394->packet;
336 return res;
337}
338
339static int dc1394_v2_close(AVFormatContext * context)
340{
341 struct dc1394_data *dc1394 = context->priv_data;
342
343 dc1394_video_set_transmission(dc1394->camera, DC1394_OFF);
344 dc1394_capture_stop(dc1394->camera);
345 dc1394_camera_free(dc1394->camera);
346 dc1394_free(dc1394->d);
347
348 return 0;
349}
350
351AVInputFormat libdc1394_demuxer = {
352 .name = "libdc1394",
353 .long_name = NULL_IF_CONFIG_SMALL("dc1394 v.2 A/V grab"),
354 .priv_data_size = sizeof(struct dc1394_data),
355 .read_header = dc1394_v2_read_header,
356 .read_packet = dc1394_v2_read_packet,
357 .read_close = dc1394_v2_close,
358 .flags = AVFMT_NOFILE
359};
360
361#endif
362#if ENABLE_LIBDC1394_1
363AVInputFormat libdc1394_demuxer = {
364 .name = "libdc1394",
365 .long_name = NULL_IF_CONFIG_SMALL("dc1394 v.1 A/V grab"),
366 .priv_data_size = sizeof(struct dc1394_data),
367 .read_header = dc1394_v1_read_header,
368 .read_packet = dc1394_v1_read_packet,
369 .read_close = dc1394_v1_close,
370 .flags = AVFMT_NOFILE
371};
372#endif
diff --git a/src/plugins/ffmpeg/libavdevice/v4l.c b/src/plugins/ffmpeg/libavdevice/v4l.c
deleted file mode 100644
index 8fed08c..0000000
--- a/src/plugins/ffmpeg/libavdevice/v4l.c
+++ /dev/null
@@ -1,355 +0,0 @@
1/*
2 * Linux video grab interface
3 * Copyright (c) 2000,2001 Fabrice Bellard.
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#include "config.h"
23#include "libavformat/avformat.h"
24#include "libavcodec/dsputil.h"
25#include <unistd.h>
26#include <fcntl.h>
27#include <sys/ioctl.h>
28#include <sys/mman.h>
29#include <sys/time.h>
30#define _LINUX_TIME_H 1
31#include <linux/videodev.h>
32#include <time.h>
33
34typedef struct {
35 int fd;
36 int frame_format; /* see VIDEO_PALETTE_xxx */
37 int use_mmap;
38 int width, height;
39 int frame_rate;
40 int frame_rate_base;
41 int64_t time_frame;
42 int frame_size;
43 struct video_capability video_cap;
44 struct video_audio audio_saved;
45 uint8_t *video_buf;
46 struct video_mbuf gb_buffers;
47 struct video_mmap gb_buf;
48 int gb_frame;
49} VideoData;
50
51static const struct {
52 int palette;
53 int depth;
54 enum PixelFormat pix_fmt;
55} video_formats [] = {
56 {.palette = VIDEO_PALETTE_YUV420P, .depth = 12, .pix_fmt = PIX_FMT_YUV420P },
57 {.palette = VIDEO_PALETTE_YUV422, .depth = 16, .pix_fmt = PIX_FMT_YUYV422 },
58 {.palette = VIDEO_PALETTE_UYVY, .depth = 16, .pix_fmt = PIX_FMT_UYVY422 },
59 {.palette = VIDEO_PALETTE_YUYV, .depth = 16, .pix_fmt = PIX_FMT_YUYV422 },
60 /* NOTE: v4l uses BGR24, not RGB24 */
61 {.palette = VIDEO_PALETTE_RGB24, .depth = 24, .pix_fmt = PIX_FMT_BGR24 },
62 {.palette = VIDEO_PALETTE_RGB565, .depth = 16, .pix_fmt = PIX_FMT_BGR565 },
63 {.palette = VIDEO_PALETTE_GREY, .depth = 8, .pix_fmt = PIX_FMT_GRAY8 },
64};
65
66
67static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
68{
69 VideoData *s = s1->priv_data;
70 AVStream *st;
71 int width, height;
72 int video_fd, frame_size;
73 int ret, frame_rate, frame_rate_base;
74 int desired_palette, desired_depth;
75 struct video_tuner tuner;
76 struct video_audio audio;
77 struct video_picture pict;
78 int j;
79 int vformat_num = sizeof(video_formats) / sizeof(video_formats[0]);
80
81 if (ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0) {
82 av_log(s1, AV_LOG_ERROR, "Bad capture size (%dx%d) or wrong time base (%d)\n",
83 ap->width, ap->height, ap->time_base.den);
84
85 return -1;
86 }
87
88 width = ap->width;
89 height = ap->height;
90 frame_rate = ap->time_base.den;
91 frame_rate_base = ap->time_base.num;
92
93 if((unsigned)width > 32767 || (unsigned)height > 32767) {
94 av_log(s1, AV_LOG_ERROR, "Capture size is out of range: %dx%d\n",
95 width, height);
96
97 return -1;
98 }
99
100 st = av_new_stream(s1, 0);
101 if (!st)
102 return AVERROR(ENOMEM);
103 av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
104
105 s->width = width;
106 s->height = height;
107 s->frame_rate = frame_rate;
108 s->frame_rate_base = frame_rate_base;
109
110 video_fd = open(s1->filename, O_RDWR);
111 if (video_fd < 0) {
112 av_log(s1, AV_LOG_ERROR, "%s: %s\n", s1->filename, strerror(errno));
113 goto fail;
114 }
115
116 if (ioctl(video_fd,VIDIOCGCAP, &s->video_cap) < 0) {
117 av_log(s1, AV_LOG_ERROR, "VIDIOCGCAP: %s\n", strerror(errno));
118 goto fail;
119 }
120
121 if (!(s->video_cap.type & VID_TYPE_CAPTURE)) {
122 av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not handle capture\n");
123 goto fail;
124 }
125
126 desired_palette = -1;
127 desired_depth = -1;
128 for (j = 0; j < vformat_num; j++) {
129 if (ap->pix_fmt == video_formats[j].pix_fmt) {
130 desired_palette = video_formats[j].palette;
131 desired_depth = video_formats[j].depth;
132 break;
133 }
134 }
135
136 /* set tv standard */
137 if (ap->standard && !ioctl(video_fd, VIDIOCGTUNER, &tuner)) {
138 if (!strcasecmp(ap->standard, "pal"))
139 tuner.mode = VIDEO_MODE_PAL;
140 else if (!strcasecmp(ap->standard, "secam"))
141 tuner.mode = VIDEO_MODE_SECAM;
142 else
143 tuner.mode = VIDEO_MODE_NTSC;
144 ioctl(video_fd, VIDIOCSTUNER, &tuner);
145 }
146
147 /* unmute audio */
148 audio.audio = 0;
149 ioctl(video_fd, VIDIOCGAUDIO, &audio);
150 memcpy(&s->audio_saved, &audio, sizeof(audio));
151 audio.flags &= ~VIDEO_AUDIO_MUTE;
152 ioctl(video_fd, VIDIOCSAUDIO, &audio);
153
154 ioctl(video_fd, VIDIOCGPICT, &pict);
155#if 0
156 printf("v4l: colour=%d hue=%d brightness=%d constrast=%d whiteness=%d\n",
157 pict.colour,
158 pict.hue,
159 pict.brightness,
160 pict.contrast,
161 pict.whiteness);
162#endif
163 /* try to choose a suitable video format */
164 pict.palette = desired_palette;
165 pict.depth= desired_depth;
166 if (desired_palette == -1 || (ret = ioctl(video_fd, VIDIOCSPICT, &pict)) < 0) {
167 for (j = 0; j < vformat_num; j++) {
168 pict.palette = video_formats[j].palette;
169 pict.depth = video_formats[j].depth;
170 if (-1 != ioctl(video_fd, VIDIOCSPICT, &pict))
171 break;
172 }
173 if (j >= vformat_num)
174 goto fail1;
175 }
176
177 ret = ioctl(video_fd,VIDIOCGMBUF,&s->gb_buffers);
178 if (ret < 0) {
179 /* try to use read based access */
180 struct video_window win;
181 int val;
182
183 win.x = 0;
184 win.y = 0;
185 win.width = width;
186 win.height = height;
187 win.chromakey = -1;
188 win.flags = 0;
189
190 ioctl(video_fd, VIDIOCSWIN, &win);
191
192 s->frame_format = pict.palette;
193
194 val = 1;
195 ioctl(video_fd, VIDIOCCAPTURE, &val);
196
197 s->time_frame = av_gettime() * s->frame_rate / s->frame_rate_base;
198 s->use_mmap = 0;
199 } else {
200 s->video_buf = mmap(0,s->gb_buffers.size,PROT_READ|PROT_WRITE,MAP_SHARED,video_fd,0);
201 if ((unsigned char*)-1 == s->video_buf) {
202 s->video_buf = mmap(0,s->gb_buffers.size,PROT_READ|PROT_WRITE,MAP_PRIVATE,video_fd,0);
203 if ((unsigned char*)-1 == s->video_buf) {
204 av_log(s1, AV_LOG_ERROR, "mmap: %s\n", strerror(errno));
205 goto fail;
206 }
207 }
208 s->gb_frame = 0;
209 s->time_frame = av_gettime() * s->frame_rate / s->frame_rate_base;
210
211 /* start to grab the first frame */
212 s->gb_buf.frame = s->gb_frame % s->gb_buffers.frames;
213 s->gb_buf.height = height;
214 s->gb_buf.width = width;
215 s->gb_buf.format = pict.palette;
216
217 ret = ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
218 if (ret < 0) {
219 if (errno != EAGAIN) {
220 fail1:
221 av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not support suitable format\n");
222 } else {
223 av_log(s1, AV_LOG_ERROR,"Fatal: grab device does not receive any video signal\n");
224 }
225 goto fail;
226 }
227 for (j = 1; j < s->gb_buffers.frames; j++) {
228 s->gb_buf.frame = j;
229 ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
230 }
231 s->frame_format = s->gb_buf.format;
232 s->use_mmap = 1;
233 }
234
235 for (j = 0; j < vformat_num; j++) {
236 if (s->frame_format == video_formats[j].palette) {
237 frame_size = width * height * video_formats[j].depth / 8;
238 st->codec->pix_fmt = video_formats[j].pix_fmt;
239 break;
240 }
241 }
242
243 if (j >= vformat_num)
244 goto fail;
245
246 s->fd = video_fd;
247 s->frame_size = frame_size;
248
249 st->codec->codec_type = CODEC_TYPE_VIDEO;
250 st->codec->codec_id = CODEC_ID_RAWVIDEO;
251 st->codec->width = width;
252 st->codec->height = height;
253 st->codec->time_base.den = frame_rate;
254 st->codec->time_base.num = frame_rate_base;
255 st->codec->bit_rate = frame_size * 1/av_q2d(st->codec->time_base) * 8;
256
257 return 0;
258 fail:
259 if (video_fd >= 0)
260 close(video_fd);
261 return AVERROR(EIO);
262}
263
264static int v4l_mm_read_picture(VideoData *s, uint8_t *buf)
265{
266 uint8_t *ptr;
267
268 while (ioctl(s->fd, VIDIOCSYNC, &s->gb_frame) < 0 &&
269 (errno == EAGAIN || errno == EINTR));
270
271 ptr = s->video_buf + s->gb_buffers.offsets[s->gb_frame];
272 memcpy(buf, ptr, s->frame_size);
273
274 /* Setup to capture the next frame */
275 s->gb_buf.frame = s->gb_frame;
276 if (ioctl(s->fd, VIDIOCMCAPTURE, &s->gb_buf) < 0) {
277 if (errno == EAGAIN)
278 av_log(NULL, AV_LOG_ERROR, "Cannot Sync\n");
279 else
280 av_log(NULL, AV_LOG_ERROR, "VIDIOCMCAPTURE: %s\n", strerror(errno));
281 return AVERROR(EIO);
282 }
283
284 /* This is now the grabbing frame */
285 s->gb_frame = (s->gb_frame + 1) % s->gb_buffers.frames;
286
287 return s->frame_size;
288}
289
290static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
291{
292 VideoData *s = s1->priv_data;
293 int64_t curtime, delay;
294 struct timespec ts;
295
296 /* Calculate the time of the next frame */
297 s->time_frame += INT64_C(1000000);
298
299 /* wait based on the frame rate */
300 for(;;) {
301 curtime = av_gettime();
302 delay = s->time_frame * s->frame_rate_base / s->frame_rate - curtime;
303 if (delay <= 0) {
304 if (delay < INT64_C(-1000000) * s->frame_rate_base / s->frame_rate) {
305 /* printf("grabbing is %d frames late (dropping)\n", (int) -(delay / 16666)); */
306 s->time_frame += INT64_C(1000000);
307 }
308 break;
309 }
310 ts.tv_sec = delay / 1000000;
311 ts.tv_nsec = (delay % 1000000) * 1000;
312 nanosleep(&ts, NULL);
313 }
314
315 if (av_new_packet(pkt, s->frame_size) < 0)
316 return AVERROR(EIO);
317
318 pkt->pts = curtime;
319
320 /* read one frame */
321 if (s->use_mmap) {
322 return v4l_mm_read_picture(s, pkt->data);
323 } else {
324 if (read(s->fd, pkt->data, pkt->size) != pkt->size)
325 return AVERROR(EIO);
326 return s->frame_size;
327 }
328}
329
330static int grab_read_close(AVFormatContext *s1)
331{
332 VideoData *s = s1->priv_data;
333
334 if (s->use_mmap)
335 munmap(s->video_buf, s->gb_buffers.size);
336
337 /* mute audio. we must force it because the BTTV driver does not
338 return its state correctly */
339 s->audio_saved.flags |= VIDEO_AUDIO_MUTE;
340 ioctl(s->fd, VIDIOCSAUDIO, &s->audio_saved);
341
342 close(s->fd);
343 return 0;
344}
345
346AVInputFormat v4l_demuxer = {
347 "video4linux",
348 NULL_IF_CONFIG_SMALL("video grab"),
349 sizeof(VideoData),
350 NULL,
351 grab_read_header,
352 grab_read_packet,
353 grab_read_close,
354 .flags = AVFMT_NOFILE,
355};
diff --git a/src/plugins/ffmpeg/libavdevice/v4l2.c b/src/plugins/ffmpeg/libavdevice/v4l2.c
deleted file mode 100644
index 460fdb0..0000000
--- a/src/plugins/ffmpeg/libavdevice/v4l2.c
+++ /dev/null
@@ -1,641 +0,0 @@
1/*
2 * Video4Linux2 grab interface
3 * Copyright (c) 2000,2001 Fabrice Bellard.
4 * Copyright (c) 2006 Luca Abeni.
5 *
6 * Part of this file is based on the V4L2 video capture example
7 * (http://v4l2spec.bytesex.org/v4l2spec/capture.c)
8 *
9 * Thanks to Michael Niedermayer for providing the mapping between
10 * V4L2_PIX_FMT_* and PIX_FMT_*
11 *
12 *
13 * This file is part of FFmpeg.
14 *
15 * FFmpeg is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU Lesser General Public
17 * License as published by the Free Software Foundation; either
18 * version 2.1 of the License, or (at your option) any later version.
19 *
20 * FFmpeg is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * Lesser General Public License for more details.
24 *
25 * You should have received a copy of the GNU Lesser General Public
26 * License along with FFmpeg; if not, write to the Free Software
27 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29
30#include "config.h"
31#include "libavformat/avformat.h"
32#include <unistd.h>
33#include <fcntl.h>
34#include <sys/ioctl.h>
35#include <sys/mman.h>
36#include <sys/time.h>
37#include <asm/types.h>
38#include <linux/videodev2.h>
39#include <time.h>
40
41static const int desired_video_buffers = 256;
42
43enum io_method {
44 io_read,
45 io_mmap,
46 io_userptr
47};
48
49struct video_data {
50 int fd;
51 int frame_format; /* V4L2_PIX_FMT_* */
52 enum io_method io_method;
53 int width, height;
54 int frame_rate;
55 int frame_rate_base;
56 int frame_size;
57 int top_field_first;
58
59 int buffers;
60 void **buf_start;
61 unsigned int *buf_len;
62};
63
64struct buff_data {
65 int index;
66 int fd;
67};
68
69struct fmt_map {
70 enum PixelFormat ff_fmt;
71 int32_t v4l2_fmt;
72};
73
74static struct fmt_map fmt_conversion_table[] = {
75 {
76 .ff_fmt = PIX_FMT_YUV420P,
77 .v4l2_fmt = V4L2_PIX_FMT_YUV420,
78 },
79 {
80 .ff_fmt = PIX_FMT_YUV422P,
81 .v4l2_fmt = V4L2_PIX_FMT_YUV422P,
82 },
83 {
84 .ff_fmt = PIX_FMT_YUYV422,
85 .v4l2_fmt = V4L2_PIX_FMT_YUYV,
86 },
87 {
88 .ff_fmt = PIX_FMT_UYVY422,
89 .v4l2_fmt = V4L2_PIX_FMT_UYVY,
90 },
91 {
92 .ff_fmt = PIX_FMT_YUV411P,
93 .v4l2_fmt = V4L2_PIX_FMT_YUV411P,
94 },
95 {
96 .ff_fmt = PIX_FMT_YUV410P,
97 .v4l2_fmt = V4L2_PIX_FMT_YUV410,
98 },
99 {
100 .ff_fmt = PIX_FMT_BGR24,
101 .v4l2_fmt = V4L2_PIX_FMT_BGR24,
102 },
103 {
104 .ff_fmt = PIX_FMT_RGB24,
105 .v4l2_fmt = V4L2_PIX_FMT_RGB24,
106 },
107 /*
108 {
109 .ff_fmt = PIX_FMT_RGB32,
110 .v4l2_fmt = V4L2_PIX_FMT_BGR32,
111 },
112 */
113 {
114 .ff_fmt = PIX_FMT_GRAY8,
115 .v4l2_fmt = V4L2_PIX_FMT_GREY,
116 },
117};
118
119static int device_open(AVFormatContext *ctx, uint32_t *capabilities)
120{
121 struct v4l2_capability cap;
122 int fd;
123 int res;
124 int flags = O_RDWR;
125
126 if (ctx->flags & AVFMT_FLAG_NONBLOCK) {
127 flags |= O_NONBLOCK;
128 }
129 fd = open(ctx->filename, flags, 0);
130 if (fd < 0) {
131 av_log(ctx, AV_LOG_ERROR, "Cannot open video device %s : %s\n",
132 ctx->filename, strerror(errno));
133
134 return -1;
135 }
136
137 res = ioctl(fd, VIDIOC_QUERYCAP, &cap);
138 // ENOIOCTLCMD definition only availble on __KERNEL__
139 if (res < 0 && errno == 515)
140 {
141 av_log(ctx, AV_LOG_ERROR, "QUERYCAP not implemented, probably V4L device but not supporting V4L2\n");
142 close(fd);
143
144 return -1;
145 }
146 if (res < 0) {
147 av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYCAP): %s\n",
148 strerror(errno));
149 close(fd);
150
151 return -1;
152 }
153 if ((cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) == 0) {
154 av_log(ctx, AV_LOG_ERROR, "Not a video capture device\n");
155 close(fd);
156
157 return -1;
158 }
159 *capabilities = cap.capabilities;
160
161 return fd;
162}
163
164static int device_init(AVFormatContext *ctx, int *width, int *height, int pix_fmt)
165{
166 struct video_data *s = ctx->priv_data;
167 int fd = s->fd;
168 struct v4l2_format fmt;
169 int res;
170
171 memset(&fmt, 0, sizeof(struct v4l2_format));
172 fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
173 fmt.fmt.pix.width = *width;
174 fmt.fmt.pix.height = *height;
175 fmt.fmt.pix.pixelformat = pix_fmt;
176 fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
177 res = ioctl(fd, VIDIOC_S_FMT, &fmt);
178 if ((*width != fmt.fmt.pix.width) || (*height != fmt.fmt.pix.height)) {
179 av_log(ctx, AV_LOG_INFO, "The V4L2 driver changed the video from %dx%d to %dx%d\n", *width, *height, fmt.fmt.pix.width, fmt.fmt.pix.height);
180 *width = fmt.fmt.pix.width;
181 *height = fmt.fmt.pix.height;
182 }
183
184 return res;
185}
186
187static int first_field(int fd)
188{
189 int res;
190 v4l2_std_id std;
191
192 res = ioctl(fd, VIDIOC_G_STD, &std);
193 if (res < 0) {
194 return 0;
195 }
196 if (std & V4L2_STD_NTSC) {
197 return 0;
198 }
199
200 return 1;
201}
202
203static uint32_t fmt_ff2v4l(enum PixelFormat pix_fmt)
204{
205 int i;
206
207 for (i = 0; i < sizeof(fmt_conversion_table) / sizeof(struct fmt_map); i++) {
208 if (fmt_conversion_table[i].ff_fmt == pix_fmt) {
209 return fmt_conversion_table[i].v4l2_fmt;
210 }
211 }
212
213 return 0;
214}
215
216static enum PixelFormat fmt_v4l2ff(uint32_t pix_fmt)
217{
218 int i;
219
220 for (i = 0; i < sizeof(fmt_conversion_table) / sizeof(struct fmt_map); i++) {
221 if (fmt_conversion_table[i].v4l2_fmt == pix_fmt) {
222 return fmt_conversion_table[i].ff_fmt;
223 }
224 }
225
226 return PIX_FMT_NONE;
227}
228
229static int mmap_init(AVFormatContext *ctx)
230{
231 struct video_data *s = ctx->priv_data;
232 struct v4l2_requestbuffers req;
233 int i, res;
234
235 memset(&req, 0, sizeof(struct v4l2_requestbuffers));
236 req.count = desired_video_buffers;
237 req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
238 req.memory = V4L2_MEMORY_MMAP;
239 res = ioctl (s->fd, VIDIOC_REQBUFS, &req);
240 if (res < 0) {
241 if (errno == EINVAL) {
242 av_log(ctx, AV_LOG_ERROR, "Device does not support mmap\n");
243 } else {
244 av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_REQBUFS)\n");
245 }
246
247 return -1;
248 }
249
250 if (req.count < 2) {
251 av_log(ctx, AV_LOG_ERROR, "Insufficient buffer memory\n");
252
253 return -1;
254 }
255 s->buffers = req.count;
256 s->buf_start = av_malloc(sizeof(void *) * s->buffers);
257 if (s->buf_start == NULL) {
258 av_log(ctx, AV_LOG_ERROR, "Cannot allocate buffer pointers\n");
259
260 return -1;
261 }
262 s->buf_len = av_malloc(sizeof(unsigned int) * s->buffers);
263 if (s->buf_len == NULL) {
264 av_log(ctx, AV_LOG_ERROR, "Cannot allocate buffer sizes\n");
265 av_free(s->buf_start);
266
267 return -1;
268 }
269
270 for (i = 0; i < req.count; i++) {
271 struct v4l2_buffer buf;
272
273 memset(&buf, 0, sizeof(struct v4l2_buffer));
274 buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
275 buf.memory = V4L2_MEMORY_MMAP;
276 buf.index = i;
277 res = ioctl (s->fd, VIDIOC_QUERYBUF, &buf);
278 if (res < 0) {
279 av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYBUF)\n");
280
281 return -1;
282 }
283
284 s->buf_len[i] = buf.length;
285 if (s->buf_len[i] < s->frame_size) {
286 av_log(ctx, AV_LOG_ERROR, "Buffer len [%d] = %d != %d\n", i, s->buf_len[i], s->frame_size);
287
288 return -1;
289 }
290 s->buf_start[i] = mmap (NULL, buf.length,
291 PROT_READ | PROT_WRITE, MAP_SHARED, s->fd, buf.m.offset);
292 if (s->buf_start[i] == MAP_FAILED) {
293 av_log(ctx, AV_LOG_ERROR, "mmap: %s\n", strerror(errno));
294
295 return -1;
296 }
297 }
298
299 return 0;
300}
301
302static int read_init(AVFormatContext *ctx)
303{
304 return -1;
305}
306
307static void mmap_release_buffer(AVPacket *pkt)
308{
309 struct v4l2_buffer buf;
310 int res, fd;
311 struct buff_data *buf_descriptor = pkt->priv;
312
313 memset(&buf, 0, sizeof(struct v4l2_buffer));
314 buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
315 buf.memory = V4L2_MEMORY_MMAP;
316 buf.index = buf_descriptor->index;
317 fd = buf_descriptor->fd;
318 av_free(buf_descriptor);
319
320 res = ioctl (fd, VIDIOC_QBUF, &buf);
321 if (res < 0) {
322 av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF)\n");
323 }
324 pkt->data = NULL;
325 pkt->size = 0;
326}
327
328static int mmap_read_frame(AVFormatContext *ctx, AVPacket *pkt)
329{
330 struct video_data *s = ctx->priv_data;
331 struct v4l2_buffer buf;
332 struct buff_data *buf_descriptor;
333 int res;
334
335 memset(&buf, 0, sizeof(struct v4l2_buffer));
336 buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
337 buf.memory = V4L2_MEMORY_MMAP;
338
339 /* FIXME: Some special treatment might be needed in case of loss of signal... */
340 while ((res = ioctl(s->fd, VIDIOC_DQBUF, &buf)) < 0 && (errno == EINTR));
341 if (res < 0) {
342 if (errno == EAGAIN) {
343 pkt->size = 0;
344
345 return AVERROR(EAGAIN);
346 }
347 av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_DQBUF): %s\n", strerror(errno));
348
349 return -1;
350 }
351 assert (buf.index < s->buffers);
352 if (buf.bytesused != s->frame_size) {
353 av_log(ctx, AV_LOG_ERROR, "The v4l2 frame is %d bytes, but %d bytes are expected\n", buf.bytesused, s->frame_size);
354
355 return -1;
356 }
357
358 /* Image is at s->buff_start[buf.index] */
359 pkt->data= s->buf_start[buf.index];
360 pkt->size = buf.bytesused;
361 pkt->pts = buf.timestamp.tv_sec * INT64_C(1000000) + buf.timestamp.tv_usec;
362 pkt->destruct = mmap_release_buffer;
363 buf_descriptor = av_malloc(sizeof(struct buff_data));
364 if (buf_descriptor == NULL) {
365 /* Something went wrong... Since av_malloc() failed, we cannot even
366 * allocate a buffer for memcopying into it
367 */
368 av_log(ctx, AV_LOG_ERROR, "Failed to allocate a buffer descriptor\n");
369 res = ioctl (s->fd, VIDIOC_QBUF, &buf);
370
371 return -1;
372 }
373 buf_descriptor->fd = s->fd;
374 buf_descriptor->index = buf.index;
375 pkt->priv = buf_descriptor;
376
377 return s->buf_len[buf.index];
378}
379
380static int read_frame(AVFormatContext *ctx, AVPacket *pkt)
381{
382 return -1;
383}
384
385static int mmap_start(AVFormatContext *ctx)
386{
387 struct video_data *s = ctx->priv_data;
388 enum v4l2_buf_type type;
389 int i, res;
390
391 for (i = 0; i < s->buffers; i++) {
392 struct v4l2_buffer buf;
393
394 memset(&buf, 0, sizeof(struct v4l2_buffer));
395 buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
396 buf.memory = V4L2_MEMORY_MMAP;
397 buf.index = i;
398
399 res = ioctl (s->fd, VIDIOC_QBUF, &buf);
400 if (res < 0) {
401 av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF): %s\n", strerror(errno));
402
403 return -1;
404 }
405 }
406
407 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
408 res = ioctl (s->fd, VIDIOC_STREAMON, &type);
409 if (res < 0) {
410 av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_STREAMON): %s\n", strerror(errno));
411
412 return -1;
413 }
414
415 return 0;
416}
417
418static void mmap_close(struct video_data *s)
419{
420 enum v4l2_buf_type type;
421 int i;
422
423 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
424 /* We do not check for the result, because we could
425 * not do anything about it anyway...
426 */
427 ioctl(s->fd, VIDIOC_STREAMOFF, &type);
428 for (i = 0; i < s->buffers; i++) {
429 munmap(s->buf_start[i], s->buf_len[i]);
430 }
431 av_free(s->buf_start);
432 av_free(s->buf_len);
433}
434
435static int v4l2_set_parameters( AVFormatContext *s1, AVFormatParameters *ap )
436{
437 struct video_data *s = s1->priv_data;
438 struct v4l2_input input;
439 struct v4l2_standard standard;
440 int i;
441
442 if(ap->channel>=0) {
443 /* set tv video input */
444 memset (&input, 0, sizeof (input));
445 input.index = ap->channel;
446 if(ioctl (s->fd, VIDIOC_ENUMINPUT, &input) < 0) {
447 av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl enum input failed:\n");
448 return AVERROR(EIO);
449 }
450
451 av_log(s1, AV_LOG_DEBUG, "The V4L2 driver set input_id: %d, input: %s\n",
452 ap->channel, input.name);
453 if(ioctl (s->fd, VIDIOC_S_INPUT, &input.index) < 0 ) {
454 av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl set input(%d) failed\n",
455 ap->channel);
456 return AVERROR(EIO);
457 }
458 }
459
460 if(ap->standard) {
461 av_log(s1, AV_LOG_DEBUG, "The V4L2 driver set standard: %s\n",
462 ap->standard );
463 /* set tv standard */
464 memset (&standard, 0, sizeof (standard));
465 for(i=0;;i++) {
466 standard.index = i;
467 if (ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) {
468 av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl set standard(%s) failed\n",
469 ap->standard);
470 return AVERROR(EIO);
471 }
472
473 if(!strcasecmp(standard.name, ap->standard)) {
474 break;
475 }
476 }
477
478 av_log(s1, AV_LOG_DEBUG, "The V4L2 driver set standard: %s, id: %"PRIu64"\n",
479 ap->standard, standard.id);
480 if (ioctl(s->fd, VIDIOC_S_STD, &standard.id) < 0) {
481 av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl set standard(%s) failed\n",
482 ap->standard);
483 return AVERROR(EIO);
484 }
485 }
486
487 return 0;
488}
489
490static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap)
491{
492 struct video_data *s = s1->priv_data;
493 AVStream *st;
494 int width, height;
495 int res, frame_rate, frame_rate_base;
496 uint32_t desired_format, capabilities;
497
498 if (ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0) {
499 av_log(s1, AV_LOG_ERROR, "Missing/Wrong width, height or framerate\n");
500
501 return -1;
502 }
503
504 width = ap->width;
505 height = ap->height;
506 frame_rate = ap->time_base.den;
507 frame_rate_base = ap->time_base.num;
508
509 if((unsigned)width > 32767 || (unsigned)height > 32767) {
510 av_log(s1, AV_LOG_ERROR, "Wrong size %dx%d\n", width, height);
511
512 return -1;
513 }
514
515 st = av_new_stream(s1, 0);
516 if (!st) {
517 return AVERROR(ENOMEM);
518 }
519 av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
520
521 s->width = width;
522 s->height = height;
523 s->frame_rate = frame_rate;
524 s->frame_rate_base = frame_rate_base;
525
526 capabilities = 0;
527 s->fd = device_open(s1, &capabilities);
528 if (s->fd < 0) {
529 return AVERROR(EIO);
530 }
531 av_log(s1, AV_LOG_INFO, "[%d]Capabilities: %x\n", s->fd, capabilities);
532
533 desired_format = fmt_ff2v4l(ap->pix_fmt);
534 if (desired_format == 0 || (device_init(s1, &width, &height, desired_format) < 0)) {
535 int i, done;
536
537 done = 0; i = 0;
538 while (!done) {
539 desired_format = fmt_conversion_table[i].v4l2_fmt;
540 if (device_init(s1, &width, &height, desired_format) < 0) {
541 desired_format = 0;
542 i++;
543 } else {
544 done = 1;
545 }
546 if (i == sizeof(fmt_conversion_table) / sizeof(struct fmt_map)) {
547 done = 1;
548 }
549 }
550 }
551 if (desired_format == 0) {
552 av_log(s1, AV_LOG_ERROR, "Cannot find a proper format.\n");
553 close(s->fd);
554
555 return AVERROR(EIO);
556 }
557 s->frame_format = desired_format;
558
559 if( v4l2_set_parameters( s1, ap ) < 0 )
560 return AVERROR(EIO);
561
562 st->codec->pix_fmt = fmt_v4l2ff(desired_format);
563 s->frame_size = avpicture_get_size(st->codec->pix_fmt, width, height);
564 if (capabilities & V4L2_CAP_STREAMING) {
565 s->io_method = io_mmap;
566 res = mmap_init(s1);
567 if (res == 0) {
568 res = mmap_start(s1);
569 }
570 } else {
571 s->io_method = io_read;
572 res = read_init(s1);
573 }
574 if (res < 0) {
575 close(s->fd);
576
577 return AVERROR(EIO);
578 }
579 s->top_field_first = first_field(s->fd);
580
581 st->codec->codec_type = CODEC_TYPE_VIDEO;
582 st->codec->codec_id = CODEC_ID_RAWVIDEO;
583 st->codec->width = width;
584 st->codec->height = height;
585 st->codec->time_base.den = frame_rate;
586 st->codec->time_base.num = frame_rate_base;
587 st->codec->bit_rate = s->frame_size * 1/av_q2d(st->codec->time_base) * 8;
588
589 return 0;
590}
591
592static int v4l2_read_packet(AVFormatContext *s1, AVPacket *pkt)
593{
594 struct video_data *s = s1->priv_data;
595 int res;
596
597 if (s->io_method == io_mmap) {
598 av_init_packet(pkt);
599 res = mmap_read_frame(s1, pkt);
600 } else if (s->io_method == io_read) {
601 if (av_new_packet(pkt, s->frame_size) < 0)
602 return AVERROR(EIO);
603
604 res = read_frame(s1, pkt);
605 } else {
606 return AVERROR(EIO);
607 }
608 if (res < 0) {
609 return res;
610 }
611
612 if (s1->streams[0]->codec->coded_frame) {
613 s1->streams[0]->codec->coded_frame->interlaced_frame = 1;
614 s1->streams[0]->codec->coded_frame->top_field_first = s->top_field_first;
615 }
616
617 return s->frame_size;
618}
619
620static int v4l2_read_close(AVFormatContext *s1)
621{
622 struct video_data *s = s1->priv_data;
623
624 if (s->io_method == io_mmap) {
625 mmap_close(s);
626 }
627
628 close(s->fd);
629 return 0;
630}
631
632AVInputFormat v4l2_demuxer = {
633 "video4linux2",
634 NULL_IF_CONFIG_SMALL("video grab"),
635 sizeof(struct video_data),
636 NULL,
637 v4l2_read_header,
638 v4l2_read_packet,
639 v4l2_read_close,
640 .flags = AVFMT_NOFILE,
641};
diff --git a/src/plugins/ffmpeg/libavdevice/vfwcap.c b/src/plugins/ffmpeg/libavdevice/vfwcap.c
deleted file mode 100644
index be6003a..0000000
--- a/src/plugins/ffmpeg/libavdevice/vfwcap.c
+++ /dev/null
@@ -1,466 +0,0 @@
1/*
2 * VFW capture interface
3 * Copyright (c) 2006-2008 Ramiro Polla.
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#include "libavformat/avformat.h"
23#include <vfw.h>
24#include <windows.h>
25
26//#define DEBUG_VFW
27
28/* Defines for VFW missing from MinGW.
29 * Remove this when MinGW incorporates them. */
30#define WM_CAP_START (0x0400)
31#define WM_CAP_SET_CALLBACK_VIDEOSTREAM (WM_CAP_START + 6)
32#define WM_CAP_DRIVER_CONNECT (WM_CAP_START + 10)
33#define WM_CAP_DRIVER_DISCONNECT (WM_CAP_START + 11)
34#define WM_CAP_GET_VIDEOFORMAT (WM_CAP_START + 44)
35#define WM_CAP_SET_VIDEOFORMAT (WM_CAP_START + 45)
36#define WM_CAP_SET_PREVIEW (WM_CAP_START + 50)
37#define WM_CAP_SET_OVERLAY (WM_CAP_START + 51)
38#define WM_CAP_SEQUENCE_NOFILE (WM_CAP_START + 63)
39#define WM_CAP_SET_SEQUENCE_SETUP (WM_CAP_START + 64)
40#define WM_CAP_GET_SEQUENCE_SETUP (WM_CAP_START + 65)
41
42#define HWND_MESSAGE ((HWND)-3)
43
44#define BI_RGB 0
45
46typedef struct videohdr_tag {
47 LPBYTE lpData;
48 DWORD dwBufferLength;
49 DWORD dwBytesUsed;
50 DWORD dwTimeCaptured;
51 DWORD dwUser;
52 DWORD dwFlags;
53 DWORD_PTR dwReserved[4];
54} VIDEOHDR, NEAR *PVIDEOHDR, FAR * LPVIDEOHDR;
55
56typedef struct {
57 DWORD dwRequestMicroSecPerFrame;
58 BOOL fMakeUserHitOKToCapture;
59 UINT wPercentDropForError;
60 BOOL fYield;
61 DWORD dwIndexSize;
62 UINT wChunkGranularity;
63 BOOL fUsingDOSMemory;
64 UINT wNumVideoRequested;
65 BOOL fCaptureAudio;
66 UINT wNumAudioRequested;
67 UINT vKeyAbort;
68 BOOL fAbortLeftMouse;
69 BOOL fAbortRightMouse;
70 BOOL fLimitEnabled;
71 UINT wTimeLimit;
72 BOOL fMCIControl;
73 BOOL fStepMCIDevice;
74 DWORD dwMCIStartTime;
75 DWORD dwMCIStopTime;
76 BOOL fStepCaptureAt2x;
77 UINT wStepCaptureAverageFrames;
78 DWORD dwAudioBufferSize;
79 BOOL fDisableWriteCache;
80 UINT AVStreamMaster;
81} CAPTUREPARMS;
82/* End of missing MinGW defines */
83
84struct vfw_ctx {
85 HWND hwnd;
86 HANDLE mutex;
87 HANDLE event;
88 AVPacketList *pktl;
89 AVFormatContext *s;
90 unsigned int curbufsize;
91 unsigned int frame_num;
92};
93
94static enum PixelFormat vfw_pixfmt(DWORD biCompression, WORD biBitCount)
95{
96 switch(biCompression) {
97 case MKTAG('Y', 'U', 'Y', '2'):
98 return PIX_FMT_YUYV422;
99 case MKTAG('I', '4', '2', '0'):
100 return PIX_FMT_YUV420P;
101 case BI_RGB:
102 switch(biBitCount) { /* 1-8 are untested */
103 case 1:
104 return PIX_FMT_MONOWHITE;
105 case 4:
106 return PIX_FMT_RGB4;
107 case 8:
108 return PIX_FMT_RGB8;
109 case 16:
110 return PIX_FMT_RGB555;
111 case 24:
112 return PIX_FMT_BGR24;
113 case 32:
114 return PIX_FMT_RGB32;
115 }
116 }
117 return -1;
118}
119
120#define dstruct(pctx, sname, var, type) \
121 av_log(pctx, AV_LOG_DEBUG, #var":\t%"type"\n", sname->var)
122
123static void dump_captureparms(AVFormatContext *s, CAPTUREPARMS *cparms)
124{
125 av_log(s, AV_LOG_DEBUG, "CAPTUREPARMS\n");
126 dstruct(s, cparms, dwRequestMicroSecPerFrame, "lu");
127 dstruct(s, cparms, fMakeUserHitOKToCapture, "d");
128 dstruct(s, cparms, wPercentDropForError, "u");
129 dstruct(s, cparms, fYield, "d");
130 dstruct(s, cparms, dwIndexSize, "lu");
131 dstruct(s, cparms, wChunkGranularity, "u");
132 dstruct(s, cparms, fUsingDOSMemory, "d");
133 dstruct(s, cparms, wNumVideoRequested, "u");
134 dstruct(s, cparms, fCaptureAudio, "d");
135 dstruct(s, cparms, wNumAudioRequested, "u");
136 dstruct(s, cparms, vKeyAbort, "u");
137 dstruct(s, cparms, fAbortLeftMouse, "d");
138 dstruct(s, cparms, fAbortRightMouse, "d");
139 dstruct(s, cparms, fLimitEnabled, "d");
140 dstruct(s, cparms, wTimeLimit, "u");
141 dstruct(s, cparms, fMCIControl, "d");
142 dstruct(s, cparms, fStepMCIDevice, "d");
143 dstruct(s, cparms, dwMCIStartTime, "lu");
144 dstruct(s, cparms, dwMCIStopTime, "lu");
145 dstruct(s, cparms, fStepCaptureAt2x, "d");
146 dstruct(s, cparms, wStepCaptureAverageFrames, "u");
147 dstruct(s, cparms, dwAudioBufferSize, "lu");
148 dstruct(s, cparms, fDisableWriteCache, "d");
149 dstruct(s, cparms, AVStreamMaster, "u");
150}
151
152static void dump_videohdr(AVFormatContext *s, VIDEOHDR *vhdr)
153{
154#ifdef DEBUG_VFW
155 av_log(s, AV_LOG_DEBUG, "VIDEOHDR\n");
156 dstruct(s, vhdr, lpData, "p");
157 dstruct(s, vhdr, dwBufferLength, "lu");
158 dstruct(s, vhdr, dwBytesUsed, "lu");
159 dstruct(s, vhdr, dwTimeCaptured, "lu");
160 dstruct(s, vhdr, dwUser, "lu");
161 dstruct(s, vhdr, dwFlags, "lu");
162 dstruct(s, vhdr, dwReserved[0], "lu");
163 dstruct(s, vhdr, dwReserved[1], "lu");
164 dstruct(s, vhdr, dwReserved[2], "lu");
165 dstruct(s, vhdr, dwReserved[3], "lu");
166#endif
167}
168
169static void dump_bih(AVFormatContext *s, BITMAPINFOHEADER *bih)
170{
171 av_log(s, AV_LOG_DEBUG, "BITMAPINFOHEADER\n");
172 dstruct(s, bih, biSize, "lu");
173 dstruct(s, bih, biWidth, "ld");
174 dstruct(s, bih, biHeight, "ld");
175 dstruct(s, bih, biPlanes, "d");
176 dstruct(s, bih, biBitCount, "d");
177 dstruct(s, bih, biCompression, "lu");
178 av_log(s, AV_LOG_DEBUG, " biCompression:\t\"%.4s\"\n",
179 (char*) &bih->biCompression);
180 dstruct(s, bih, biSizeImage, "lu");
181 dstruct(s, bih, biXPelsPerMeter, "lu");
182 dstruct(s, bih, biYPelsPerMeter, "lu");
183 dstruct(s, bih, biClrUsed, "lu");
184 dstruct(s, bih, biClrImportant, "lu");
185}
186
187static int shall_we_drop(struct vfw_ctx *ctx)
188{
189 AVFormatContext *s = ctx->s;
190 const uint8_t dropscore[] = {62, 75, 87, 100};
191 const int ndropscores = sizeof(dropscore)/sizeof(dropscore[0]);
192 unsigned int buffer_fullness = (ctx->curbufsize*100)/s->max_picture_buffer;
193
194 if(dropscore[++ctx->frame_num%ndropscores] <= buffer_fullness) {
195 av_log(ctx->s, AV_LOG_ERROR,
196 "real-time buffer %d%% full! frame dropped!\n", buffer_fullness);
197 return 1;
198 }
199
200 return 0;
201}
202
203static LRESULT CALLBACK videostream_cb(HWND hwnd, LPVIDEOHDR vdhdr)
204{
205 struct vfw_ctx *ctx;
206 AVPacketList **ppktl, *pktl_next;
207
208 ctx = (struct vfw_ctx *) GetWindowLongPtr(hwnd, GWLP_USERDATA);
209
210 dump_videohdr(ctx->s, vdhdr);
211
212 if(shall_we_drop(ctx))
213 return FALSE;
214
215 WaitForSingleObject(ctx->mutex, INFINITE);
216
217 pktl_next = av_mallocz(sizeof(AVPacketList));
218 if(!pktl_next)
219 goto fail;
220
221 if(av_new_packet(&pktl_next->pkt, vdhdr->dwBytesUsed) < 0) {
222 av_free(pktl_next);
223 goto fail;
224 }
225
226 pktl_next->pkt.pts = vdhdr->dwTimeCaptured;
227 memcpy(pktl_next->pkt.data, vdhdr->lpData, vdhdr->dwBytesUsed);
228
229 for(ppktl = &ctx->pktl ; *ppktl ; ppktl = &(*ppktl)->next);
230 *ppktl = pktl_next;
231
232 ctx->curbufsize += vdhdr->dwBytesUsed;
233
234 SetEvent(ctx->event);
235 ReleaseMutex(ctx->mutex);
236
237 return TRUE;
238fail:
239 ReleaseMutex(ctx->mutex);
240 return FALSE;
241}
242
243static int vfw_read_close(AVFormatContext *s);
244
245static int vfw_read_header(AVFormatContext *s, AVFormatParameters *ap)
246{
247 struct vfw_ctx *ctx = s->priv_data;
248 AVCodecContext *codec;
249 AVStream *st;
250 int devnum;
251 int bisize;
252 BITMAPINFO *bi;
253 CAPTUREPARMS cparms;
254 DWORD biCompression;
255 WORD biBitCount;
256 int width;
257 int height;
258 int ret;
259
260 if(!ap->time_base.den) {
261 av_log(s, AV_LOG_ERROR, "A time base must be specified.\n");
262 return AVERROR_IO;
263 }
264
265 ctx->s = s;
266
267 ctx->hwnd = capCreateCaptureWindow(NULL, 0, 0, 0, 0, 0, HWND_MESSAGE, 0);
268 if(!ctx->hwnd) {
269 av_log(s, AV_LOG_ERROR, "Could not create capture window.\n");
270 return AVERROR_IO;
271 }
272
273 /* If atoi fails, devnum==0 and the default device is used */
274 devnum = atoi(s->filename);
275
276 ret = SendMessage(ctx->hwnd, WM_CAP_DRIVER_CONNECT, devnum, 0);
277 if(!ret) {
278 av_log(s, AV_LOG_ERROR, "Could not connect to device.\n");
279 DestroyWindow(ctx->hwnd);
280 return AVERROR(ENODEV);
281 }
282
283 SendMessage(ctx->hwnd, WM_CAP_SET_OVERLAY, 0, 0);
284 SendMessage(ctx->hwnd, WM_CAP_SET_PREVIEW, 0, 0);
285
286 ret = SendMessage(ctx->hwnd, WM_CAP_SET_CALLBACK_VIDEOSTREAM, 0,
287 (LPARAM) videostream_cb);
288 if(!ret) {
289 av_log(s, AV_LOG_ERROR, "Could not set video stream callback.\n");
290 goto fail_io;
291 }
292
293 SetWindowLongPtr(ctx->hwnd, GWLP_USERDATA, (LONG_PTR) ctx);
294
295 st = av_new_stream(s, 0);
296 if(!st) {
297 vfw_read_close(s);
298 return AVERROR_NOMEM;
299 }
300
301 /* Set video format */
302 bisize = SendMessage(ctx->hwnd, WM_CAP_GET_VIDEOFORMAT, 0, 0);
303 if(!bisize)
304 goto fail_io;
305 bi = av_malloc(bisize);
306 if(!bi) {
307 vfw_read_close(s);
308 return AVERROR_NOMEM;
309 }
310 ret = SendMessage(ctx->hwnd, WM_CAP_GET_VIDEOFORMAT, bisize, (LPARAM) bi);
311 if(!ret)
312 goto fail_bi;
313
314 dump_bih(s, &bi->bmiHeader);
315
316 width = ap->width ? ap->width : bi->bmiHeader.biWidth ;
317 height = ap->height ? ap->height : bi->bmiHeader.biHeight;
318 bi->bmiHeader.biWidth = width ;
319 bi->bmiHeader.biHeight = height;
320
321#if 0
322 /* For testing yet unsupported compressions
323 * Copy these values from user-supplied verbose information */
324 bi->bmiHeader.biWidth = 320;
325 bi->bmiHeader.biHeight = 240;
326 bi->bmiHeader.biPlanes = 1;
327 bi->bmiHeader.biBitCount = 12;
328 bi->bmiHeader.biCompression = MKTAG('I','4','2','0');
329 bi->bmiHeader.biSizeImage = 115200;
330 dump_bih(s, &bi->bmiHeader);
331#endif
332
333 ret = SendMessage(ctx->hwnd, WM_CAP_SET_VIDEOFORMAT, bisize, (LPARAM) bi);
334 if(!ret) {
335 av_log(s, AV_LOG_ERROR, "Could not set Video Format.\n");
336 goto fail_bi;
337 }
338
339 biCompression = bi->bmiHeader.biCompression;
340 biBitCount = bi->bmiHeader.biBitCount;
341
342 av_free(bi);
343
344 /* Set sequence setup */
345 ret = SendMessage(ctx->hwnd, WM_CAP_GET_SEQUENCE_SETUP, sizeof(cparms),
346 (LPARAM) &cparms);
347 if(!ret)
348 goto fail_io;
349
350 dump_captureparms(s, &cparms);
351
352 cparms.fYield = 1; // Spawn a background thread
353 cparms.dwRequestMicroSecPerFrame =
354 (ap->time_base.num*1000000) / ap->time_base.den;
355 cparms.fAbortLeftMouse = 0;
356 cparms.fAbortRightMouse = 0;
357 cparms.fCaptureAudio = 0;
358 cparms.vKeyAbort = 0;
359
360 ret = SendMessage(ctx->hwnd, WM_CAP_SET_SEQUENCE_SETUP, sizeof(cparms),
361 (LPARAM) &cparms);
362 if(!ret)
363 goto fail_io;
364
365 codec = st->codec;
366 codec->time_base = ap->time_base;
367 codec->codec_type = CODEC_TYPE_VIDEO;
368 codec->width = width;
369 codec->height = height;
370 codec->codec_id = CODEC_ID_RAWVIDEO;
371 codec->pix_fmt = vfw_pixfmt(biCompression, biBitCount);
372 if(biCompression == BI_RGB)
373 codec->bits_per_sample = biBitCount;
374
375 av_set_pts_info(st, 32, 1, 1000);
376
377 if(codec->pix_fmt == -1) {
378 av_log(s, AV_LOG_ERROR, "Unknown compression type."
379 "Please report verbose (-v 99) debug information.\n");
380 vfw_read_close(s);
381 return AVERROR_PATCHWELCOME;
382 }
383
384 ctx->mutex = CreateMutex(NULL, 0, NULL);
385 if(!ctx->mutex) {
386 av_log(s, AV_LOG_ERROR, "Could not create Mutex.\n" );
387 goto fail_io;
388 }
389 ctx->event = CreateEvent(NULL, 1, 0, NULL);
390 if(!ctx->event) {
391 av_log(s, AV_LOG_ERROR, "Could not create Event.\n" );
392 goto fail_io;
393 }
394
395 ret = SendMessage(ctx->hwnd, WM_CAP_SEQUENCE_NOFILE, 0, 0);
396 if(!ret) {
397 av_log(s, AV_LOG_ERROR, "Could not start capture sequence.\n" );
398 goto fail_io;
399 }
400
401 return 0;
402
403fail_bi:
404 av_free(bi);
405
406fail_io:
407 vfw_read_close(s);
408 return AVERROR_IO;
409}
410
411static int vfw_read_packet(AVFormatContext *s, AVPacket *pkt)
412{
413 struct vfw_ctx *ctx = s->priv_data;
414 AVPacketList *pktl = NULL;
415
416 while(!pktl) {
417 WaitForSingleObject(ctx->mutex, INFINITE);
418 pktl = ctx->pktl;
419 if(ctx->pktl) {
420 *pkt = ctx->pktl->pkt;
421 ctx->pktl = ctx->pktl->next;
422 av_free(pktl);
423 }
424 ResetEvent(ctx->event);
425 ReleaseMutex(ctx->mutex);
426 if(!pktl) {
427 if(s->flags & AVFMT_FLAG_NONBLOCK) {
428 return AVERROR(EAGAIN);
429 } else {
430 WaitForSingleObject(ctx->event, INFINITE);
431 }
432 }
433 }
434
435 ctx->curbufsize -= pkt->size;
436
437 return pkt->size;
438}
439
440static int vfw_read_close(AVFormatContext *s)
441{
442 struct vfw_ctx *ctx = s->priv_data;
443
444 if(ctx->hwnd) {
445 SendMessage(ctx->hwnd, WM_CAP_SET_CALLBACK_VIDEOSTREAM, 0, 0);
446 SendMessage(ctx->hwnd, WM_CAP_DRIVER_DISCONNECT, 0, 0);
447 DestroyWindow(ctx->hwnd);
448 }
449 if(ctx->mutex)
450 CloseHandle(ctx->mutex);
451 if(ctx->event)
452 CloseHandle(ctx->event);
453
454 return 0;
455}
456
457AVInputFormat vfwcap_demuxer = {
458 "vfwcap",
459 NULL_IF_CONFIG_SMALL("VFW video capture"),
460 sizeof(struct vfw_ctx),
461 NULL,
462 vfw_read_header,
463 vfw_read_packet,
464 vfw_read_close,
465 .flags = AVFMT_NOFILE,
466};
diff --git a/src/plugins/ffmpeg/libavdevice/x11grab.c b/src/plugins/ffmpeg/libavdevice/x11grab.c
deleted file mode 100644
index 4e1a499..0000000
--- a/src/plugins/ffmpeg/libavdevice/x11grab.c
+++ /dev/null
@@ -1,529 +0,0 @@
1/*
2 * X11 video grab interface
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg integration:
7 * Copyright (C) 2006 Clemens Fruhwirth <clemens@endorphin.org>
8 * Edouard Gomez <ed.gomez@free.fr>
9 *
10 * This file contains code from grab.c:
11 * Copyright (c) 2000-2001 Fabrice Bellard
12 *
13 * This file contains code from the xvidcap project:
14 * Copyright (C) 1997-1998 Rasca, Berlin
15 * 2003-2004 Karl H. Beckers, Frankfurt
16 *
17 * FFmpeg is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * FFmpeg is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with FFmpeg; if not, write to the Free Software
29 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
30 */
31
32/**
33 * @file x11grab.c
34 * X11 frame device demuxer by Clemens Fruhwirth <clemens@endorphin.org>
35 * and Edouard Gomez <ed.gomez@free.fr>.
36 */
37
38#include "config.h"
39#include "libavformat/avformat.h"
40#include <unistd.h>
41#include <fcntl.h>
42#include <sys/ioctl.h>
43#include <sys/time.h>
44#define _LINUX_TIME_H 1
45#include <time.h>
46#include <X11/X.h>
47#include <X11/Xlib.h>
48#include <X11/Xlibint.h>
49#include <X11/Xproto.h>
50#include <X11/Xutil.h>
51#include <sys/ipc.h>
52#include <sys/shm.h>
53#include <X11/extensions/XShm.h>
54
55/**
56 * X11 Device Demuxer context
57 */
58typedef struct x11_grab_s
59{
60 int frame_size; /**< Size in bytes of a grabbed frame */
61 AVRational time_base; /**< Time base */
62 int64_t time_frame; /**< Current time */
63
64 int height; /**< Height of the grab frame */
65 int width; /**< Width of the grab frame */
66 int x_off; /**< Horizontal top-left corner coordinate */
67 int y_off; /**< Vertical top-left corner coordinate */
68
69 Display *dpy; /**< X11 display from which x11grab grabs frames */
70 XImage *image; /**< X11 image holding the grab */
71 int use_shm; /**< !0 when using XShm extension */
72 XShmSegmentInfo shminfo; /**< When using XShm, keeps track of XShm infos */
73 int mouse_warning_shown;
74} x11_grab_t;
75
76/**
77 * Initializes the x11 grab device demuxer (public device demuxer API).
78 *
79 * @param s1 Context from avformat core
80 * @param ap Parameters from avformat core
81 * @return <ul>
82 * <li>AVERROR(ENOMEM) no memory left</li>
83 * <li>AVERROR(EIO) other failure case</li>
84 * <li>0 success</li>
85 * </ul>
86 */
87static int
88x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
89{
90 x11_grab_t *x11grab = s1->priv_data;
91 Display *dpy;
92 AVStream *st = NULL;
93 int input_pixfmt;
94 XImage *image;
95 int x_off = 0;
96 int y_off = 0;
97 int use_shm;
98 char *param, *offset;
99
100 param = av_strdup(s1->filename);
101 offset = strchr(param, '+');
102 if (offset) {
103 sscanf(offset, "%d,%d", &x_off, &y_off);
104 *offset= 0;
105 }
106
107 av_log(s1, AV_LOG_INFO, "device: %s -> display: %s x: %d y: %d width: %d height: %d\n", s1->filename, param, x_off, y_off, ap->width, ap->height);
108
109 dpy = XOpenDisplay(param);
110 if(!dpy) {
111 av_log(s1, AV_LOG_ERROR, "Could not open X display.\n");
112 return AVERROR(EIO);
113 }
114
115 if (!ap || ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0) {
116 av_log(s1, AV_LOG_ERROR, "AVParameters don't have video size and/or rate. Use -s and -r.\n");
117 return AVERROR(EIO);
118 }
119
120 st = av_new_stream(s1, 0);
121 if (!st) {
122 return AVERROR(ENOMEM);
123 }
124 av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
125
126 use_shm = XShmQueryExtension(dpy);
127 av_log(s1, AV_LOG_INFO, "shared memory extension %s found\n", use_shm ? "" : "not");
128
129 if(use_shm) {
130 int scr = XDefaultScreen(dpy);
131 image = XShmCreateImage(dpy,
132 DefaultVisual(dpy, scr),
133 DefaultDepth(dpy, scr),
134 ZPixmap,
135 NULL,
136 &x11grab->shminfo,
137 ap->width, ap->height);
138 x11grab->shminfo.shmid = shmget(IPC_PRIVATE,
139 image->bytes_per_line * image->height,
140 IPC_CREAT|0777);
141 if (x11grab->shminfo.shmid == -1) {
142 av_log(s1, AV_LOG_ERROR, "Fatal: Can't get shared memory!\n");
143 return AVERROR(ENOMEM);
144 }
145 x11grab->shminfo.shmaddr = image->data = shmat(x11grab->shminfo.shmid, 0, 0);
146 x11grab->shminfo.readOnly = False;
147
148 if (!XShmAttach(dpy, &x11grab->shminfo)) {
149 av_log(s1, AV_LOG_ERROR, "Fatal: Failed to attach shared memory!\n");
150 /* needs some better error subroutine :) */
151 return AVERROR(EIO);
152 }
153 } else {
154 image = XGetImage(dpy, RootWindow(dpy, DefaultScreen(dpy)),
155 x_off,y_off,
156 ap->width,ap->height,
157 AllPlanes, ZPixmap);
158 }
159
160 switch (image->bits_per_pixel) {
161 case 8:
162 av_log (s1, AV_LOG_DEBUG, "8 bit palette\n");
163 input_pixfmt = PIX_FMT_PAL8;
164 break;
165 case 16:
166 if ( image->red_mask == 0xf800 &&
167 image->green_mask == 0x07e0 &&
168 image->blue_mask == 0x001f ) {
169 av_log (s1, AV_LOG_DEBUG, "16 bit RGB565\n");
170 input_pixfmt = PIX_FMT_RGB565;
171 } else if (image->red_mask == 0x7c00 &&
172 image->green_mask == 0x03e0 &&
173 image->blue_mask == 0x001f ) {
174 av_log(s1, AV_LOG_DEBUG, "16 bit RGB555\n");
175 input_pixfmt = PIX_FMT_RGB555;
176 } else {
177 av_log(s1, AV_LOG_ERROR, "RGB ordering at image depth %i not supported ... aborting\n", image->bits_per_pixel);
178 av_log(s1, AV_LOG_ERROR, "color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\n", image->red_mask, image->green_mask, image->blue_mask);
179 return AVERROR(EIO);
180 }
181 break;
182 case 24:
183 if ( image->red_mask == 0xff0000 &&
184 image->green_mask == 0x00ff00 &&
185 image->blue_mask == 0x0000ff ) {
186 input_pixfmt = PIX_FMT_BGR24;
187 } else if ( image->red_mask == 0x0000ff &&
188 image->green_mask == 0x00ff00 &&
189 image->blue_mask == 0xff0000 ) {
190 input_pixfmt = PIX_FMT_RGB24;
191 } else {
192 av_log(s1, AV_LOG_ERROR,"rgb ordering at image depth %i not supported ... aborting\n", image->bits_per_pixel);
193 av_log(s1, AV_LOG_ERROR, "color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\n", image->red_mask, image->green_mask, image->blue_mask);
194 return AVERROR(EIO);
195 }
196 break;
197 case 32:
198#if 0
199 GetColorInfo (image, &c_info);
200 if ( c_info.alpha_mask == 0xff000000 && image->green_mask == 0x0000ff00) {
201 /* byte order is relevant here, not endianness
202 * endianness is handled by avcodec, but atm no such thing
203 * as having ABGR, instead of ARGB in a word. Since we
204 * need this for Solaris/SPARC, but need to do the conversion
205 * for every frame we do it outside of this loop, cf. below
206 * this matches both ARGB32 and ABGR32 */
207 input_pixfmt = PIX_FMT_ARGB32;
208 } else {
209 av_log(s1, AV_LOG_ERROR,"image depth %i not supported ... aborting\n", image->bits_per_pixel);
210 return AVERROR(EIO);
211 }
212#endif
213 input_pixfmt = PIX_FMT_RGB32;
214 break;
215 default:
216 av_log(s1, AV_LOG_ERROR, "image depth %i not supported ... aborting\n", image->bits_per_pixel);
217 return -1;
218 }
219
220 x11grab->frame_size = ap->width * ap->height * image->bits_per_pixel/8;
221 x11grab->dpy = dpy;
222 x11grab->width = ap->width;
223 x11grab->height = ap->height;
224 x11grab->time_base = ap->time_base;
225 x11grab->time_frame = av_gettime() / av_q2d(ap->time_base);
226 x11grab->x_off = x_off;
227 x11grab->y_off = y_off;
228 x11grab->image = image;
229 x11grab->use_shm = use_shm;
230 x11grab->mouse_warning_shown = 0;
231
232 st->codec->codec_type = CODEC_TYPE_VIDEO;
233 st->codec->codec_id = CODEC_ID_RAWVIDEO;
234 st->codec->width = ap->width;
235 st->codec->height = ap->height;
236 st->codec->pix_fmt = input_pixfmt;
237 st->codec->time_base = ap->time_base;
238 st->codec->bit_rate = x11grab->frame_size * 1/av_q2d(ap->time_base) * 8;
239
240 return 0;
241}
242
243/**
244 * Get pointer coordinates from X11.
245 *
246 * @param x Integer where horizontal coordinate will be returned
247 * @param y Integer where vertical coordinate will be returned
248 * @param dpy X11 display from where pointer coordinates are retrieved
249 * @param s1 Context used for logging errors if necessary
250 */
251static void
252get_pointer_coordinates(int *x, int *y, Display *dpy, AVFormatContext *s1)
253{
254 Window mrootwindow, childwindow;
255 int dummy;
256
257 mrootwindow = DefaultRootWindow(dpy);
258
259 if (XQueryPointer(dpy, mrootwindow, &mrootwindow, &childwindow,
260 x, y, &dummy, &dummy, (unsigned int*)&dummy)) {
261 } else {
262 x11_grab_t *s = s1->priv_data;
263 if (!s->mouse_warning_shown) {
264 av_log(s1, AV_LOG_INFO, "couldn't find mouse pointer\n");
265 s->mouse_warning_shown = 1;
266 }
267 *x = -1;
268 *y = -1;
269 }
270}
271
272/**
273 * Mouse painting helper function that applies an 'and' and 'or' mask pair to
274 * '*dst' pixel. It actually draws a mouse pointer pixel to grabbed frame.
275 *
276 * @param dst Destination pixel
277 * @param and Part of the mask that must be applied using a bitwise 'and'
278 * operator
279 * @param or Part of the mask that must be applied using a bitwise 'or'
280 * operator
281 * @param bits_per_pixel Bits per pixel used in the grabbed image
282 */
283static void inline
284apply_masks(uint8_t *dst, int and, int or, int bits_per_pixel)
285{
286 switch (bits_per_pixel) {
287 case 32:
288 *(uint32_t*)dst = (*(uint32_t*)dst & and) | or;
289 break;
290 case 16:
291 *(uint16_t*)dst = (*(uint16_t*)dst & and) | or;
292 break;
293 case 8:
294 *dst = !!or;
295 break;
296 }
297}
298
299/**
300 * Paints a mouse pointer in an X11 image.
301 *
302 * @param image image to paint the mouse pointer to
303 * @param s context used to retrieve original grabbing rectangle
304 * coordinates
305 * @param x Mouse pointer coordinate
306 * @param y Mouse pointer coordinate
307 */
308static void
309paint_mouse_pointer(XImage *image, x11_grab_t *s, int x, int y)
310{
311 /* 16x20x1bpp bitmap for the black channel of the mouse pointer */
312 static const uint16_t const mousePointerBlack[] =
313 {
314 0x0000, 0x0003, 0x0005, 0x0009, 0x0011,
315 0x0021, 0x0041, 0x0081, 0x0101, 0x0201,
316 0x03c1, 0x0049, 0x0095, 0x0093, 0x0120,
317 0x0120, 0x0240, 0x0240, 0x0380, 0x0000
318 };
319
320 /* 16x20x1bpp bitmap for the white channel of the mouse pointer */
321 static const uint16_t const mousePointerWhite[] =
322 {
323 0x0000, 0x0000, 0x0002, 0x0006, 0x000e,
324 0x001e, 0x003e, 0x007e, 0x00fe, 0x01fe,
325 0x003e, 0x0036, 0x0062, 0x0060, 0x00c0,
326 0x00c0, 0x0180, 0x0180, 0x0000, 0x0000
327 };
328
329 int x_off = s->x_off;
330 int y_off = s->y_off;
331 int width = s->width;
332 int height = s->height;
333
334 if ( x - x_off >= 0 && x < width + x_off
335 && y - y_off >= 0 && y < height + y_off) {
336 uint8_t *im_data = (uint8_t*)image->data;
337 int bytes_per_pixel;
338 int line;
339 int masks;
340
341 /* Select correct masks and pixel size */
342 if (image->bits_per_pixel == 8) {
343 masks = 1;
344 } else {
345 masks = (image->red_mask|image->green_mask|image->blue_mask);
346 }
347 bytes_per_pixel = image->bits_per_pixel>>3;
348
349 /* Shift to right line */
350 im_data += image->bytes_per_line * (y - y_off);
351 /* Shift to right pixel in the line */
352 im_data += bytes_per_pixel * (x - x_off);
353
354 /* Draw the cursor - proper loop */
355 for (line = 0; line < FFMIN(20, (y_off + height) - y); line++) {
356 uint8_t *cursor = im_data;
357 int column;
358 uint16_t bm_b;
359 uint16_t bm_w;
360
361 bm_b = mousePointerBlack[line];
362 bm_w = mousePointerWhite[line];
363
364 for (column = 0; column < FFMIN(16, (x_off + width) - x); column++) {
365 apply_masks(cursor, ~(masks*(bm_b&1)), masks*(bm_w&1),
366 image->bits_per_pixel);
367 cursor += bytes_per_pixel;
368 bm_b >>= 1;
369 bm_w >>= 1;
370 }
371 im_data += image->bytes_per_line;
372 }
373 }
374}
375
376
377/**
378 * Reads new data in the image structure.
379 *
380 * @param dpy X11 display to grab from
381 * @param d
382 * @param image Image where the grab will be put
383 * @param x Top-Left grabbing rectangle horizontal coordinate
384 * @param y Top-Left grabbing rectangle vertical coordinate
385 * @return 0 if error, !0 if successful
386 */
387static int
388xget_zpixmap(Display *dpy, Drawable d, XImage *image, int x, int y)
389{
390 xGetImageReply rep;
391 xGetImageReq *req;
392 long nbytes;
393
394 if (!image) {
395 return 0;
396 }
397
398 LockDisplay(dpy);
399 GetReq(GetImage, req);
400
401 /* First set up the standard stuff in the request */
402 req->drawable = d;
403 req->x = x;
404 req->y = y;
405 req->width = image->width;
406 req->height = image->height;
407 req->planeMask = (unsigned int)AllPlanes;
408 req->format = ZPixmap;
409
410 if (!_XReply(dpy, (xReply *)&rep, 0, xFalse) || !rep.length) {
411 UnlockDisplay(dpy);
412 SyncHandle();
413 return 0;
414 }
415
416 nbytes = (long)rep.length << 2;
417 _XReadPad(dpy, image->data, nbytes);
418
419 UnlockDisplay(dpy);
420 SyncHandle();
421 return 1;
422}
423
424/**
425 * Grabs a frame from x11 (public device demuxer API).
426 *
427 * @param s1 Context from avformat core
428 * @param pkt Packet holding the brabbed frame
429 * @return frame size in bytes
430 */
431static int
432x11grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
433{
434 x11_grab_t *s = s1->priv_data;
435 Display *dpy = s->dpy;
436 XImage *image = s->image;
437 int x_off = s->x_off;
438 int y_off = s->y_off;
439
440 int64_t curtime, delay;
441 struct timespec ts;
442
443 /* Calculate the time of the next frame */
444 s->time_frame += INT64_C(1000000);
445
446 /* wait based on the frame rate */
447 for(;;) {
448 curtime = av_gettime();
449 delay = s->time_frame * av_q2d(s->time_base) - curtime;
450 if (delay <= 0) {
451 if (delay < INT64_C(-1000000) * av_q2d(s->time_base)) {
452 s->time_frame += INT64_C(1000000);
453 }
454 break;
455 }
456 ts.tv_sec = delay / 1000000;
457 ts.tv_nsec = (delay % 1000000) * 1000;
458 nanosleep(&ts, NULL);
459 }
460
461 if (av_new_packet(pkt, s->frame_size) < 0) {
462 return AVERROR(EIO);
463 }
464
465 pkt->pts = curtime;
466
467 if(s->use_shm) {
468 if (!XShmGetImage(dpy, RootWindow(dpy, DefaultScreen(dpy)), image, x_off, y_off, AllPlanes)) {
469 av_log (s1, AV_LOG_INFO, "XShmGetImage() failed\n");
470 }
471 } else {
472 if (!xget_zpixmap(dpy, RootWindow(dpy, DefaultScreen(dpy)), image, x_off, y_off)) {
473 av_log (s1, AV_LOG_INFO, "XGetZPixmap() failed\n");
474 }
475 }
476
477 {
478 int pointer_x, pointer_y;
479 get_pointer_coordinates(&pointer_x, &pointer_y, dpy, s1);
480 paint_mouse_pointer(image, s, pointer_x, pointer_y);
481 }
482
483
484 /* XXX: avoid memcpy */
485 memcpy(pkt->data, image->data, s->frame_size);
486 return s->frame_size;
487}
488
489/**
490 * Closes x11 frame grabber (public device demuxer API).
491 *
492 * @param s1 Context from avformat core
493 * @return 0 success, !0 failure
494 */
495static int
496x11grab_read_close(AVFormatContext *s1)
497{
498 x11_grab_t *x11grab = s1->priv_data;
499
500 /* Detach cleanly from shared mem */
501 if (x11grab->use_shm) {
502 XShmDetach(x11grab->dpy, &x11grab->shminfo);
503 shmdt(x11grab->shminfo.shmaddr);
504 shmctl(x11grab->shminfo.shmid, IPC_RMID, NULL);
505 }
506
507 /* Destroy X11 image */
508 if (x11grab->image) {
509 XDestroyImage(x11grab->image);
510 x11grab->image = NULL;
511 }
512
513 /* Free X11 display */
514 XCloseDisplay(x11grab->dpy);
515 return 0;
516}
517
518/** x11 grabber device demuxer declaration */
519AVInputFormat x11_grab_device_demuxer =
520{
521 "x11grab",
522 NULL_IF_CONFIG_SMALL("X11grab"),
523 sizeof(x11_grab_t),
524 NULL,
525 x11grab_read_header,
526 x11grab_read_packet,
527 x11grab_read_close,
528 .flags = AVFMT_NOFILE,
529};