aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorrizzo <rizzo@f38db490-d61c-443f-a65b-d21fe96a405b>2007-12-28 22:37:40 +0000
committerrizzo <rizzo@f38db490-d61c-443f-a65b-d21fe96a405b>2007-12-28 22:37:40 +0000
commit956da6a580185e91414a135cdf8e19c26174bbc7 (patch)
tree9aaa6f9615984eb88d0c51eff4a173946d1a0431
parent55fa664279eb0cf0e6ac792e89353581d04de546 (diff)
various rearrangements and renaming of console_video stuff
git-svn-id: http://svn.digium.com/svn/asterisk/trunk@95262 f38db490-d61c-443f-a65b-d21fe96a405b
-rw-r--r--channels/console_gui.c52
-rw-r--r--channels/console_video.c247
-rw-r--r--channels/console_video.h19
-rw-r--r--channels/vcodecs.c124
4 files changed, 230 insertions, 212 deletions
diff --git a/channels/console_gui.c b/channels/console_gui.c
index 80ad58df3..a8c095b9a 100644
--- a/channels/console_gui.c
+++ b/channels/console_gui.c
@@ -96,6 +96,8 @@ static struct gui_info *cleanup_sdl(struct gui_info *gui)
if ( TTF_WasInit() )
TTF_Quit();
#endif
+ if (gui->outfd > -1)
+ close(gui->outfd);
if (gui->keypad)
SDL_FreeSurface(gui->keypad);
gui->keypad = NULL;
@@ -134,19 +136,22 @@ static void show_frame(struct video_desc *env, int out)
return;
if (out == WIN_LOCAL) { /* webcam/x11 to sdl */
- b_in = &env->out.enc_in;
- b_out = &env->out.loc_dpy;
+ b_in = &env->enc_in;
+ b_out = &env->loc_dpy;
p_in = NULL;
} else {
/* copy input format from the decoding context */
- AVCodecContext *c = env->in.dec_ctx;
- b_in = &env->in.dec_out;
+ AVCodecContext *c;
+ if (env->in == NULL) /* XXX should not happen - decoder not ready */
+ return;
+ c = env->in->dec_ctx;
+ b_in = &env->in->dec_out;
b_in->pix_fmt = c->pix_fmt;
b_in->w = c->width;
b_in->h = c->height;
- b_out = &env->in.rem_dpy;
- p_in = (AVPicture *)env->in.d_frame;
+ b_out = &env->rem_dpy;
+ p_in = (AVPicture *)env->in->d_frame;
}
bmp = gui->win[out].bmp;
SDL_LockYUVOverlay(bmp);
@@ -391,16 +396,16 @@ static void handle_button_event(struct video_desc *env, SDL_MouseButtonEvent but
gui->text_mode = 0;
/* define keypad boundary */
- if (button.x < env->in.rem_dpy.w)
+ if (button.x < env->rem_dpy.w)
index = KEY_REM_DPY; /* click on remote video */
- else if (button.x > env->in.rem_dpy.w + gui->keypad->w)
+ else if (button.x > env->rem_dpy.w + gui->keypad->w)
index = KEY_LOC_DPY; /* click on local video */
else if (button.y > gui->keypad->h)
index = KEY_OUT_OF_KEYPAD; /* click outside the keypad */
else if (gui->kp) {
int i;
for (i = 0; i < gui->kp_used; i++) {
- if (kp_match_area(&gui->kp[i], button.x - env->in.rem_dpy.w, button.y)) {
+ if (kp_match_area(&gui->kp[i], button.x - env->rem_dpy.w, button.y)) {
index = gui->kp[i].c;
break;
}
@@ -452,7 +457,7 @@ static void handle_button_event(struct video_desc *env, SDL_MouseButtonEvent but
break;
} else {
char buf[128];
- struct fbuf_t *fb = index == KEY_LOC_DPY ? &env->out.loc_dpy : &env->in.rem_dpy;
+ struct fbuf_t *fb = index == KEY_LOC_DPY ? &env->loc_dpy : &env->rem_dpy;
sprintf(buf, "%c%dx%d", button.button == SDL_BUTTON_RIGHT ? '>' : '<',
fb->w, fb->h);
video_geom(fb, buf);
@@ -619,9 +624,11 @@ static SDL_Surface *get_keypad(const char *file)
return temp;
}
+static void keypad_setup(struct gui_info *gui, const char *kp_file);
+
/* TODO: consistency checks, check for bpp, widht and height */
/* Init the mask image used to grab the action. */
-static struct gui_info *gui_init(void)
+static struct gui_info *gui_init(const char *keypad_file)
{
struct gui_info *gui = ast_calloc(1, sizeof(*gui));
@@ -630,11 +637,15 @@ static struct gui_info *gui_init(void)
/* initialize keypad status */
gui->text_mode = 0;
gui->drag_mode = 0;
+ gui->outfd = -1;
/* initialize keyboard buffer */
append_char(gui->inbuf, &gui->inbuf_pos, '\0');
append_char(gui->msgbuf, &gui->msgbuf_pos, '\0');
+ keypad_setup(gui, keypad_file);
+ if (gui->keypad == NULL) /* no keypad, we are done */
+ return gui;
#ifdef HAVE_SDL_TTF
/* Initialize SDL_ttf library and load font */
if (TTF_Init() == -1) {
@@ -771,22 +782,17 @@ static void sdl_setup(struct video_desc *env)
if (depth < 16)
depth = 16;
if (!gui)
- env->gui = gui = gui_init();
+ env->gui = gui = gui_init(env->keypad_file);
if (!gui)
goto no_sdl;
- keypad_setup(gui, env->keypad_file);
-#if 0
- ast_log(LOG_WARNING, "keypad_setup returned %p %d\n",
- gui->keypad, gui->kp_used);
-#endif
if (gui->keypad) {
kp_w = gui->keypad->w;
kp_h = gui->keypad->h;
}
#define BORDER 5 /* border around our windows */
- maxw = env->in.rem_dpy.w + env->out.loc_dpy.w + kp_w;
- maxh = MAX( MAX(env->in.rem_dpy.h, env->out.loc_dpy.h), kp_h);
+ maxw = env->rem_dpy.w + env->loc_dpy.w + kp_w;
+ maxh = MAX( MAX(env->rem_dpy.h, env->loc_dpy.h), kp_h);
maxw += 4 * BORDER;
maxh += 2 * BORDER;
gui->screen = SDL_SetVideoMode(maxw, maxh, depth, 0);
@@ -797,11 +803,11 @@ static void sdl_setup(struct video_desc *env)
SDL_WM_SetCaption("Asterisk console Video Output", NULL);
if (set_win(gui->screen, &gui->win[WIN_REMOTE], dpy_fmt,
- env->in.rem_dpy.w, env->in.rem_dpy.h, BORDER, BORDER))
+ env->rem_dpy.w, env->rem_dpy.h, BORDER, BORDER))
goto no_sdl;
if (set_win(gui->screen, &gui->win[WIN_LOCAL], dpy_fmt,
- env->out.loc_dpy.w, env->out.loc_dpy.h,
- 3*BORDER+env->in.rem_dpy.w + kp_w, BORDER))
+ env->loc_dpy.w, env->loc_dpy.h,
+ 3*BORDER+env->rem_dpy.w + kp_w, BORDER))
goto no_sdl;
/* display the skin, but do not free it as we need it later to
@@ -809,7 +815,7 @@ static void sdl_setup(struct video_desc *env)
*/
if (gui->keypad) {
struct SDL_Rect *dest = &gui->win[WIN_KEYPAD].rect;
- dest->x = 2*BORDER + env->in.rem_dpy.w;
+ dest->x = 2*BORDER + env->rem_dpy.w;
dest->y = BORDER;
dest->w = gui->keypad->w;
dest->h = gui->keypad->h;
diff --git a/channels/console_video.c b/channels/console_video.c
index 149909634..18269aeb1 100644
--- a/channels/console_video.c
+++ b/channels/console_video.c
@@ -127,24 +127,6 @@ int console_video_formats =
AST_FORMAT_MP4_VIDEO | AST_FORMAT_H264 | AST_FORMAT_H261 ;
-/*
- * In many places we use buffers to store the raw frames (but not only),
- * so here is a structure to keep all the info. data = NULL means the
- * structure is not initialized, so the other fields are invalid.
- * size = 0 means the buffer is not malloc'ed so we don't have to free it.
- */
-struct fbuf_t { /* frame buffers, dynamically allocated */
- uint8_t *data; /* memory, malloced if size > 0, just reference
- * otherwise */
- int size; /* total size in bytes */
- int used; /* space used so far */
- int ebit; /* bits to ignore at the end */
- int x; /* origin, if necessary */
- int y;
- int w; /* size */
- int h;
- int pix_fmt;
-};
static void my_scale(struct fbuf_t *in, AVPicture *p_in,
struct fbuf_t *out, AVPicture *p_out);
@@ -177,9 +159,7 @@ struct video_out_desc {
int sendvideo;
struct fbuf_t loc_src; /* local source buffer, allocated in video_open() */
- struct fbuf_t enc_in; /* encoder input buffer, allocated in video_out_init() */
struct fbuf_t enc_out; /* encoder output buffer, allocated in video_out_init() */
- struct fbuf_t loc_dpy; /* display source buffer, no buffer (managed by SDL in bmp[1]) */
struct video_codec_desc *enc; /* encoder */
void *enc_ctx; /* encoding context */
@@ -200,59 +180,35 @@ struct video_out_desc {
};
/*
- * Descriptor for the incoming stream, with a buffer for the bitstream
- * extracted by the RTP packets, RTP reassembly info, and a frame buffer
- * for the decoded frame (buf).
- * and store the result in a suitable frame buffer for later display.
- * NOTE: dec_ctx == NULL means the rest is invalid (e.g. because no
- * codec, no memory, etc.) and we must drop all incoming frames.
- *
- * Incoming payload is stored in one of the dec_in[] buffers, which are
- * emptied by the video thread. These buffers are organized in a circular
- * queue, with dec_in_cur being the buffer in use by the incoming stream,
- * and dec_in_dpy is the one being displayed. When the pointers need to
- * be changed, we synchronize the access to them with dec_in_lock.
- * When the list is full dec_in_cur = NULL (we cannot store new data),
- * when the list is empty dec_in_dpy is NULL (we cannot display frames).
- */
-struct video_in_desc {
- struct video_codec_desc *dec; /* decoder */
- AVCodecContext *dec_ctx; /* information about the codec in the stream */
- AVCodec *codec; /* reference to the codec */
- AVFrame *d_frame; /* place to store the decoded frame */
- AVCodecParserContext *parser;
- uint16_t next_seq; /* must be 16 bit */
- int discard; /* flag for discard status */
-#define N_DEC_IN 3 /* number of incoming buffers */
- struct fbuf_t *dec_in_cur; /* buffer being filled in */
- struct fbuf_t *dec_in_dpy; /* buffer to display */
- ast_mutex_t dec_in_lock;
- struct fbuf_t dec_in[N_DEC_IN]; /* incoming bitstream, allocated/extended in fbuf_append() */
- struct fbuf_t dec_out; /* decoded frame, no buffer (data is in AVFrame) */
- struct fbuf_t rem_dpy; /* display remote image, no buffer (it is in win[WIN_REMOTE].bmp) */
-};
-
-
-/*
* The overall descriptor, with room for config info, video source and
* received data descriptors, SDL info, etc.
+ * This should be globally visible to all modules (grabber, vcodecs, gui)
+ * and contain all configurtion info.
*/
struct video_desc {
char codec_name[64]; /* the codec we use */
pthread_t vthread; /* video thread */
+ ast_mutex_t dec_lock; /* sync decoder and video thread */
int shutdown; /* set to shutdown vthread */
struct ast_channel *owner; /* owner channel */
- struct video_in_desc in; /* remote video descriptor */
- struct video_out_desc out; /* local video descriptor */
- struct gui_info *gui;
+ struct fbuf_t enc_in; /* encoder input buffer, allocated in video_out_init() */
char keypad_file[256]; /* image for the keypad */
char keypad_font[256]; /* font for the keypad */
char sdl_videodriver[256];
+
+ struct fbuf_t rem_dpy; /* display remote video, no buffer (it is in win[WIN_REMOTE].bmp) */
+ struct fbuf_t loc_dpy; /* display local source, no buffer (managed by SDL in bmp[1]) */
+
+
+ /* local information for grabbers, codecs, gui */
+ struct gui_info *gui;
+ struct video_dec_desc *in; /* remote video descriptor */
+ struct video_out_desc out; /* local video descriptor */
};
static AVPicture *fill_pict(struct fbuf_t *b, AVPicture *p);
@@ -604,84 +560,12 @@ static struct video_codec_desc *map_config_video_format(char *name)
return supported_codecs[i];
}
-/*! \brief uninitialize the descriptor for remote video stream */
-static int video_in_uninit(struct video_in_desc *v)
-{
- int i;
-
- if (v->parser) {
- av_parser_close(v->parser);
- v->parser = NULL;
- }
- if (v->dec_ctx) {
- avcodec_close(v->dec_ctx);
- av_free(v->dec_ctx);
- v->dec_ctx = NULL;
- }
- if (v->d_frame) {
- av_free(v->d_frame);
- v->d_frame = NULL;
- }
- v->codec = NULL; /* only a reference */
- v->dec = NULL; /* forget the decoder */
- v->discard = 1; /* start in discard mode */
- for (i = 0; i < N_DEC_IN; i++)
- fbuf_free(&v->dec_in[i]);
- fbuf_free(&v->dec_out);
- fbuf_free(&v->rem_dpy);
- return -1; /* error, in case someone cares */
-}
-
-/*
- * initialize ffmpeg resources used for decoding frames from the network.
- */
-static int video_in_init(struct video_in_desc *v, uint32_t format)
-{
- enum CodecID codec;
-
- /* XXX should check that these are already set */
- v->codec = NULL;
- v->dec_ctx = NULL;
- v->d_frame = NULL;
- v->parser = NULL;
- v->discard = 1;
-
- codec = map_video_format(format, CM_RD);
-
- v->codec = avcodec_find_decoder(codec);
- if (!v->codec) {
- ast_log(LOG_WARNING, "Unable to find the decoder for format %d\n", codec);
- return video_in_uninit(v);
- }
- /*
- * Initialize the codec context.
- */
- v->dec_ctx = avcodec_alloc_context();
- /* XXX call dec_init() ? */
- if (avcodec_open(v->dec_ctx, v->codec) < 0) {
- ast_log(LOG_WARNING, "Cannot open the codec context\n");
- av_free(v->dec_ctx);
- v->dec_ctx = NULL;
- return video_in_uninit(v);
- }
-
- v->parser = av_parser_init(codec);
- if (!v->parser) {
- ast_log(LOG_WARNING, "Cannot initialize the decoder parser\n");
- return video_in_uninit(v);
- }
-
- v->d_frame = avcodec_alloc_frame();
- if (!v->d_frame) {
- ast_log(LOG_WARNING, "Cannot allocate decoding video frame\n");
- return video_in_uninit(v);
- }
- return 0; /* ok */
-}
/*! \brief uninitialize the descriptor for local video stream */
-static int video_out_uninit(struct video_out_desc *v)
+static int video_out_uninit(struct video_desc *env)
{
+ struct video_out_desc *v = &env->out;
+
/* XXX this should be a codec callback */
if (v->enc_ctx) {
AVCodecContext *enc_ctx = (AVCodecContext *)v->enc_ctx;
@@ -696,9 +580,8 @@ static int video_out_uninit(struct video_out_desc *v)
v->codec = NULL; /* only a reference */
fbuf_free(&v->loc_src);
- fbuf_free(&v->enc_in);
+ fbuf_free(&env->enc_in);
fbuf_free(&v->enc_out);
- fbuf_free(&v->loc_dpy);
if (v->image) { /* X11 grabber */
XCloseDisplay(v->dpy);
v->dpy = NULL;
@@ -732,14 +615,14 @@ static int video_out_init(struct video_desc *env)
if (v->loc_src.data == NULL) {
ast_log(LOG_WARNING, "No local source active\n");
- return video_out_uninit(v);
+ return -1; /* error, but nothing to undo yet */
}
codec = map_video_format(v->enc->format, CM_WR);
v->codec = avcodec_find_encoder(codec);
if (!v->codec) {
ast_log(LOG_WARNING, "Cannot find the encoder for format %d\n",
codec);
- return video_out_uninit(v);
+ return -1; /* error, but nothing to undo yet */
}
v->mtu = 1400; /* set it early so the encoder can use it */
@@ -747,19 +630,19 @@ static int video_out_init(struct video_desc *env)
/* allocate the input buffer for encoding.
* ffmpeg only supports PIX_FMT_YUV420P for the encoding.
*/
- enc_in = &v->enc_in;
+ enc_in = &env->enc_in;
enc_in->pix_fmt = PIX_FMT_YUV420P;
enc_in->size = (enc_in->w * enc_in->h * 3)/2;
enc_in->data = ast_calloc(1, enc_in->size);
if (!enc_in->data) {
ast_log(LOG_WARNING, "Cannot allocate encoder input buffer\n");
- return video_out_uninit(v);
+ return video_out_uninit(env);
}
/* construct an AVFrame that points into buf_in */
v->enc_in_frame = avcodec_alloc_frame();
if (!v->enc_in_frame) {
ast_log(LOG_WARNING, "Unable to allocate the encoding video frame\n");
- return video_out_uninit(v);
+ return video_out_uninit(env);
}
/* parameters for PIX_FMT_YUV420P */
@@ -798,7 +681,7 @@ static int video_out_init(struct video_desc *env)
codec);
av_free(enc_ctx);
v->enc_ctx = NULL;
- return video_out_uninit(v);
+ return video_out_uninit(env);
}
}
/*
@@ -927,26 +810,16 @@ int console_write_video(struct ast_channel *chan, struct ast_frame *f);
int console_write_video(struct ast_channel *chan, struct ast_frame *f)
{
struct video_desc *env = get_video_desc(chan);
- struct video_in_desc *v = &env->in;
+ struct video_dec_desc *v = env->in;
if (!env->gui) /* no gui, no rendering */
return 0;
- if (v->dec == NULL) { /* try to get the codec */
- v->dec = map_video_codec(f->subclass & ~1);
- if (v->dec == NULL) {
- ast_log(LOG_WARNING, "cannot find video codec, drop input 0x%x\n", f->subclass);
- return 0;
- }
- if (video_in_init(v, v->dec->format)) {
- /* This is not fatal, but we won't have incoming video */
- ast_log(LOG_WARNING, "Cannot initialize input decoder\n");
- v->dec = NULL;
- return 0;
- }
- }
- if (v->dec_ctx == NULL) {
- ast_log(LOG_WARNING, "cannot decode, dropping frame\n");
- return 0; /* error */
+ if (v == NULL)
+ env->in = v = dec_init(f->subclass & ~1);
+ if (v == NULL) {
+ /* This is not fatal, but we won't have incoming video */
+ ast_log(LOG_WARNING, "Cannot initialize input decoder\n");
+ return 0;
}
if (v->dec_in_cur == NULL) /* no buffer for incoming frames, drop */
@@ -993,14 +866,14 @@ int console_write_video(struct ast_channel *chan, struct ast_frame *f)
ast_log(LOG_WARNING, "empty video frame, discard\n");
return 0;
}
- if (v->dec->dec_decap(v->dec_in_cur, f->data, f->datalen)) {
+ if (v->d_callbacks->dec_decap(v->dec_in_cur, f->data, f->datalen)) {
ast_log(LOG_WARNING, "error in dec_decap, enter discard\n");
v->discard = 1;
}
if (f->subclass & 0x01) { // RTP Marker
/* prepare to decode: advance the buffer so the video thread knows. */
struct fbuf_t *tmp = v->dec_in_cur; /* store current pointer */
- ast_mutex_lock(&v->dec_in_lock);
+ ast_mutex_lock(&env->dec_lock);
if (++v->dec_in_cur == &v->dec_in[N_DEC_IN]) /* advance to next, circular */
v->dec_in_cur = &v->dec_in[0];
if (v->dec_in_dpy == NULL) { /* were not displaying anything, so set it */
@@ -1008,7 +881,7 @@ int console_write_video(struct ast_channel *chan, struct ast_frame *f)
} else if (v->dec_in_dpy == v->dec_in_cur) { /* current slot is busy */
v->dec_in_cur = NULL;
}
- ast_mutex_unlock(&v->dec_in_lock);
+ ast_mutex_unlock(&env->dec_lock);
}
return 0;
}
@@ -1040,7 +913,7 @@ static struct ast_frame *get_video_frames(struct video_desc *env, struct ast_fra
/* Scale the video for the encoder, then use it for local rendering
* so we will see the same as the remote party.
*/
- my_scale(&v->loc_src, NULL, &v->enc_in, NULL);
+ my_scale(&v->loc_src, NULL, &env->enc_in, NULL);
show_frame(env, WIN_LOCAL);
if (!v->sendvideo)
return NULL;
@@ -1086,10 +959,7 @@ static void *video_thread(void *arg)
env->out.loc_src.x = 0;
env->out.loc_src.y = 0;
- /* reset the pointers to the current decoded image */
- env->in.dec_in_cur = &env->in.dec_in[0];
- env->in.dec_in_dpy = NULL; /* nothing to display */
- ast_mutex_init(&env->in.dec_in_lock); /* used to sync decoder and renderer */
+ ast_mutex_init(&env->dec_lock); /* used to sync decoder and renderer */
if (video_open(&env->out)) {
ast_log(LOG_WARNING, "cannot open local video source\n");
@@ -1105,7 +975,6 @@ static void *video_thread(void *arg)
for (;;) {
struct timeval t = { 0, 50000 }; /* XXX 20 times/sec */
struct ast_frame *p, *f;
- struct video_in_desc *v = &env->in;
struct ast_channel *chan = env->owner;
int fd = chan->alertpipe[1];
char *caption = NULL, buf[160];
@@ -1115,7 +984,7 @@ static void *video_thread(void *arg)
if (env->out.sendvideo)
sprintf(buf, "%s %s %dx%d @@ %dfps %dkbps",
env->out.videodevice, env->codec_name,
- env->out.enc_in.w, env->out.enc_in.h,
+ env->enc_in.w, env->enc_in.h,
env->out.fps, env->out.bitrate/1000);
else
sprintf(buf, "hold");
@@ -1131,6 +1000,9 @@ static void *video_thread(void *arg)
/* sleep for a while */
ast_select(0, NULL, NULL, NULL, &t);
+ if (env->in) {
+ struct video_dec_desc *v = env->in;
+
/*
* While there is something to display, call the decoder and free
* the buffer, possibly enabling the receiver to store new data.
@@ -1138,11 +1010,11 @@ static void *video_thread(void *arg)
while (v->dec_in_dpy) {
struct fbuf_t *tmp = v->dec_in_dpy; /* store current pointer */
- if (v->dec->dec_run(v, tmp))
+ if (v->d_callbacks->dec_run(v, tmp))
show_frame(env, WIN_REMOTE);
tmp->used = 0; /* mark buffer as free */
tmp->ebit = 0;
- ast_mutex_lock(&v->dec_in_lock);
+ ast_mutex_lock(&env->dec_lock);
if (++v->dec_in_dpy == &v->dec_in[N_DEC_IN]) /* advance to next, circular */
v->dec_in_dpy = &v->dec_in[0];
@@ -1150,9 +1022,9 @@ static void *video_thread(void *arg)
v->dec_in_cur = tmp; /* using the slot just freed */
else if (v->dec_in_dpy == v->dec_in_cur) /* this was the last slot */
v->dec_in_dpy = NULL; /* nothing more to display */
- ast_mutex_unlock(&v->dec_in_lock);
+ ast_mutex_unlock(&env->dec_lock);
}
-
+ }
f = get_video_frames(env, &p); /* read and display */
if (!f)
@@ -1185,12 +1057,12 @@ static void *video_thread(void *arg)
}
/* thread terminating, here could call the uninit */
/* uninitialize the local and remote video environments */
- video_in_uninit(&env->in);
- video_out_uninit(&env->out);
+ env->in = dec_uninit(env->in);
+ video_out_uninit(env);
if (env->gui)
env->gui = cleanup_sdl(env->gui);
- ast_mutex_destroy(&(env->in.dec_in_lock));
+ ast_mutex_destroy(&env->dec_lock);
env->shutdown = 0;
return NULL;
}
@@ -1210,9 +1082,9 @@ static void copy_geometry(struct fbuf_t *src, struct fbuf_t *dst)
static void init_env(struct video_desc *env)
{
struct fbuf_t *c = &(env->out.loc_src); /* local source */
- struct fbuf_t *ei = &(env->out.enc_in); /* encoder input */
- struct fbuf_t *ld = &(env->out.loc_dpy); /* local display */
- struct fbuf_t *rd = &(env->in.rem_dpy); /* remote display */
+ struct fbuf_t *ei = &(env->enc_in); /* encoder input */
+ struct fbuf_t *ld = &(env->loc_dpy); /* local display */
+ struct fbuf_t *rd = &(env->rem_dpy); /* remote display */
c->pix_fmt = PIX_FMT_YUV420P; /* default - camera format */
ei->pix_fmt = PIX_FMT_YUV420P; /* encoder input */
@@ -1245,7 +1117,7 @@ void console_video_start(struct video_desc *env, struct ast_channel *owner)
env->out.enc = map_config_video_format(env->codec_name);
ast_log(LOG_WARNING, "start video out %s %dx%d\n",
- env->codec_name, env->out.enc_in.w, env->out.enc_in.h);
+ env->codec_name, env->enc_in.w, env->enc_in.h);
/*
* Register all codecs supported by the ffmpeg library.
* We only need to do it once, but probably doesn't
@@ -1329,12 +1201,17 @@ int console_video_cli(struct video_desc *env, const char *var, int fd)
} else if (!strcasecmp(var, "sendvideo")) {
ast_cli(fd, "sendvideo is [%s]\n", env->out.sendvideo ? "on" : "off");
} else if (!strcasecmp(var, "video_size")) {
+ int in_w = 0, in_h = 0;
+ if (env->in) {
+ in_w = env->in->dec_out.w;
+ in_h = env->in->dec_out.h;
+ }
ast_cli(fd, "sizes: video %dx%d camera %dx%d local %dx%d remote %dx%d in %dx%d\n",
- env->out.enc_in.w, env->out.enc_in.h,
+ env->enc_in.w, env->enc_in.h,
env->out.loc_src.w, env->out.loc_src.h,
- env->out.loc_dpy.w, env->out.loc_dpy.h,
- env->in.rem_dpy.w, env->in.rem_dpy.h,
- env->in.dec_out.w, env->in.dec_out.h);
+ env->loc_dpy.w, env->loc_dpy.h,
+ env->rem_dpy.w, env->rem_dpy.h,
+ in_w, in_h);
} else if (!strcasecmp(var, "bitrate")) {
ast_cli(fd, "bitrate is [%d]\n", env->out.bitrate);
} else if (!strcasecmp(var, "qmin")) {
@@ -1376,10 +1253,10 @@ int console_video_config(struct video_desc **penv,
CV_START(var, val);
CV_STR("videodevice", env->out.videodevice);
CV_BOOL("sendvideo", env->out.sendvideo);
- CV_F("video_size", video_geom(&env->out.enc_in, val));
+ CV_F("video_size", video_geom(&env->enc_in, val));
CV_F("camera_size", video_geom(&env->out.loc_src, val));
- CV_F("local_size", video_geom(&env->out.loc_dpy, val));
- CV_F("remote_size", video_geom(&env->in.rem_dpy, val));
+ CV_F("local_size", video_geom(&env->loc_dpy, val));
+ CV_F("remote_size", video_geom(&env->rem_dpy, val));
CV_STR("keypad", env->keypad_file);
CV_F("region", keypad_cfg_read(env->gui, val));
CV_STR("keypad_font", env->keypad_font);
diff --git a/channels/console_video.h b/channels/console_video.h
index d370679a8..4a1ef2be1 100644
--- a/channels/console_video.h
+++ b/channels/console_video.h
@@ -47,6 +47,25 @@
#endif /* HAVE_VIDEO_CONSOLE and others */
+/*
+ * In many places we use buffers to store the raw frames (but not only),
+ * so here is a structure to keep all the info. data = NULL means the
+ * structure is not initialized, so the other fields are invalid.
+ * size = 0 means the buffer is not malloc'ed so we don't have to free it.
+ */
+struct fbuf_t { /* frame buffers, dynamically allocated */
+ uint8_t *data; /* memory, malloced if size > 0, just reference
+ * otherwise */
+ int size; /* total size in bytes */
+ int used; /* space used so far */
+ int ebit; /* bits to ignore at the end */
+ int x; /* origin, if necessary */
+ int y;
+ int w; /* size */
+ int h;
+ int pix_fmt;
+};
+
struct video_desc; /* opaque type for video support */
struct video_desc *get_video_desc(struct ast_channel *c);
diff --git a/channels/vcodecs.c b/channels/vcodecs.c
index 3045ece23..88439818e 100644
--- a/channels/vcodecs.c
+++ b/channels/vcodecs.c
@@ -8,7 +8,7 @@
#include "asterisk/frame.h"
struct video_out_desc;
-struct video_in_desc;
+struct video_dec_desc;
struct fbuf_t;
/*
@@ -34,7 +34,7 @@ typedef int (*decoder_init_f)(AVCodecContext *enc_ctx);
typedef int (*decoder_decap_f)(struct fbuf_t *b, uint8_t *data, int len);
/*! \brief actually call the decoder */
-typedef int (*decoder_decode_f)(struct video_in_desc *v, struct fbuf_t *b);
+typedef int (*decoder_decode_f)(struct video_dec_desc *v, struct fbuf_t *b);
struct video_codec_desc {
const char *name; /* format name */
@@ -47,6 +47,35 @@ struct video_codec_desc {
decoder_decode_f dec_run;
};
+/*
+ * Descriptor for the incoming stream, with multiple buffers for the bitstream
+ * extracted from the RTP packets, RTP reassembly info, and a frame buffer
+ * for the decoded frame (buf).
+ * The descriptor is allocated as the first frame comes in.
+ *
+ * Incoming payload is stored in one of the dec_in[] buffers, which are
+ * emptied by the video thread. These buffers are organized in a circular
+ * queue, with dec_in_cur being the buffer in use by the incoming stream,
+ * and dec_in_dpy is the one being displayed. When the pointers need to
+ * be changed, we synchronize the access to them with dec_lock.
+ * When the list is full dec_in_cur = NULL (we cannot store new data),
+ * when the list is empty dec_in_dpy = NULL (we cannot display frames).
+ */
+struct video_dec_desc {
+ struct video_codec_desc *d_callbacks; /* decoder callbacks */
+ AVCodecContext *dec_ctx; /* information about the codec in the stream */
+ AVCodec *codec; /* reference to the codec */
+ AVFrame *d_frame; /* place to store the decoded frame */
+ AVCodecParserContext *parser;
+ uint16_t next_seq; /* must be 16 bit */
+ int discard; /* flag for discard status */
+#define N_DEC_IN 3 /* number of incoming buffers */
+ struct fbuf_t *dec_in_cur; /* buffer being filled in */
+ struct fbuf_t *dec_in_dpy; /* buffer to display */
+ struct fbuf_t dec_in[N_DEC_IN]; /* incoming bitstream, allocated/extended in fbuf_append() */
+ struct fbuf_t dec_out; /* decoded frame, no buffer (data is in AVFrame) */
+};
+
#ifdef debugging_only
/* some debugging code to check the bitstream:
@@ -170,6 +199,8 @@ void dump_buf(struct fbuf_t *b)
ast_log(LOG_WARNING, "%s\n", buf);
}
#endif /* debugging_only */
+
+
/*
* Here starts the glue code for the various supported video codecs.
* For each of them, we need to provide routines for initialization,
@@ -327,7 +358,7 @@ static int ffmpeg_encode(struct video_out_desc *v)
* proper frames. After that, if we have a valid frame, we decode it
* until the entire frame is processed.
*/
-static int ffmpeg_decode(struct video_in_desc *v, struct fbuf_t *b)
+static int ffmpeg_decode(struct video_dec_desc *v, struct fbuf_t *b)
{
uint8_t *src = b->data;
int srclen = b->used;
@@ -743,7 +774,7 @@ static int mpeg4_decap(struct fbuf_t *b, uint8_t *data, int len)
return fbuf_append(b, data, len, 0, 0);
}
-static int mpeg4_decode(struct video_in_desc *v, struct fbuf_t *b)
+static int mpeg4_decode(struct video_dec_desc *v, struct fbuf_t *b)
{
int full_frame = 0, datalen = b->used;
int ret = avcodec_decode_video(v->dec_ctx, v->d_frame, &full_frame,
@@ -1015,4 +1046,89 @@ static struct video_codec_desc *map_video_codec(int fmt)
return NULL;
}
+/*! \brief uninitialize the descriptor for remote video stream */
+static struct video_dec_desc *dec_uninit(struct video_dec_desc *v)
+{
+ int i;
+
+ if (v->parser) {
+ av_parser_close(v->parser);
+ v->parser = NULL;
+ }
+ if (v->dec_ctx) {
+ avcodec_close(v->dec_ctx);
+ av_free(v->dec_ctx);
+ v->dec_ctx = NULL;
+ }
+ if (v->d_frame) {
+ av_free(v->d_frame);
+ v->d_frame = NULL;
+ }
+ v->codec = NULL; /* only a reference */
+ v->d_callbacks = NULL; /* forget the decoder */
+ v->discard = 1; /* start in discard mode */
+ for (i = 0; i < N_DEC_IN; i++)
+ fbuf_free(&v->dec_in[i]);
+ fbuf_free(&v->dec_out);
+ ast_free(v);
+ return NULL; /* error, in case someone cares */
+}
+
+/*
+ * initialize ffmpeg resources used for decoding frames from the network.
+ */
+static struct video_dec_desc *dec_init(uint32_t the_ast_format)
+{
+ enum CodecID codec;
+ struct video_dec_desc *v = ast_calloc(1, sizeof(*v));
+ if (v == NULL)
+ return NULL;
+
+ v->discard = 1;
+
+ v->d_callbacks = map_video_codec(the_ast_format);
+ if (v->d_callbacks == NULL) {
+ ast_log(LOG_WARNING, "cannot find video codec, drop input 0x%x\n", the_ast_format);
+ return dec_uninit(v);
+ }
+
+ codec = map_video_format(v->d_callbacks->format, CM_RD);
+
+ v->codec = avcodec_find_decoder(codec);
+ if (!v->codec) {
+ ast_log(LOG_WARNING, "Unable to find the decoder for format %d\n", codec);
+ return dec_uninit(v);
+ }
+ /*
+ * Initialize the codec context.
+ */
+ v->dec_ctx = avcodec_alloc_context();
+ if (!v->dec_ctx) {
+ ast_log(LOG_WARNING, "Cannot allocate the decoder context\n");
+ return dec_uninit(v);
+ }
+ /* XXX call dec_init() ? */
+ if (avcodec_open(v->dec_ctx, v->codec) < 0) {
+ ast_log(LOG_WARNING, "Cannot open the decoder context\n");
+ av_free(v->dec_ctx);
+ v->dec_ctx = NULL;
+ return dec_uninit(v);
+ }
+
+ v->parser = av_parser_init(codec);
+ if (!v->parser) {
+ ast_log(LOG_WARNING, "Cannot initialize the decoder parser\n");
+ return dec_uninit(v);
+ }
+
+ v->d_frame = avcodec_alloc_frame();
+ if (!v->d_frame) {
+ ast_log(LOG_WARNING, "Cannot allocate decoding video frame\n");
+ return dec_uninit(v);
+ }
+ v->dec_in_cur = &v->dec_in[0]; /* buffer for incoming frames */
+ v->dec_in_dpy = NULL; /* nothing to display */
+
+ return v; /* ok */
+}
/*------ end codec specific code -----*/