aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordvossel <dvossel@f38db490-d61c-443f-a65b-d21fe96a405b>2011-06-30 20:33:15 +0000
committerdvossel <dvossel@f38db490-d61c-443f-a65b-d21fe96a405b>2011-06-30 20:33:15 +0000
commit8ec002763cc11640bdf5c8be0378e179b10da935 (patch)
treeafcca011f073934d3bf0b6e94b9ca2f326b265ad
parent39c2c3129f909810ea0eb83b84fe513f2d0a87bc (diff)
Video support for ConfBridge.
Review: https://reviewboard.asterisk.org/r/1288/ git-svn-id: http://svn.digium.com/svn/asterisk/trunk@325931 f38db490-d61c-443f-a65b-d21fe96a405b
-rw-r--r--CHANGES2
-rw-r--r--apps/app_confbridge.c148
-rw-r--r--apps/app_voicemail.c24
-rw-r--r--apps/confbridge/conf_config_parser.c24
-rw-r--r--apps/confbridge/include/confbridge.h4
-rw-r--r--bridges/bridge_softmix.c76
-rw-r--r--configs/confbridge.conf.sample22
-rw-r--r--include/asterisk/bridging.h61
-rw-r--r--include/asterisk/dsp.h5
-rw-r--r--main/bridging.c117
-rw-r--r--main/dsp.c29
11 files changed, 478 insertions, 34 deletions
diff --git a/CHANGES b/CHANGES
index 53d495749..e4a936e34 100644
--- a/CHANGES
+++ b/CHANGES
@@ -82,6 +82,8 @@ ConfBridge
* CONFBRIDGE_INFO dialplan function capable of retreiving information
about a conference such as locked status and number of parties, admins,
and marked users.
+ * Addition of video_mode option in confbridge.conf for adding video support
+ into a bridge profile.
Dialplan Variables
------------------
diff --git a/apps/app_confbridge.c b/apps/app_confbridge.c
index 2ed03e120..d046c489f 100644
--- a/apps/app_confbridge.c
+++ b/apps/app_confbridge.c
@@ -77,6 +77,11 @@ ASTERISK_FILE_VERSION(__FILE__, "$Revision$")
<description>
<para>Enters the user into a specified conference bridge. The user can exit the conference by hangup or DTMF menu option.</para>
</description>
+ <see-also>
+ <ref type="application">ConfBridge</ref>
+ <ref type="function">CONFBRIDGE</ref>
+ <ref type="function">CONFBRIDGE_INFO</ref>
+ </see-also>
</application>
<function name="CONFBRIDGE" language="en_US">
<synopsis>
@@ -233,6 +238,19 @@ ASTERISK_FILE_VERSION(__FILE__, "$Revision$")
<description>
</description>
</manager>
+ <manager name="ConfbridgeSetSingleVideoSrc" language="en_US">
+ <synopsis>
+ Set a conference user as the single video source distributed to all other participants.
+ </synopsis>
+ <syntax>
+ <xi:include xpointer="xpointer(/docs/manager[@name='Login']/syntax/parameter[@name='ActionID'])" />
+ <parameter name="Conference" required="true" />
+ <parameter name="Channel" required="true" />
+ </syntax>
+ <description>
+ </description>
+ </manager>
+
***/
/*!
@@ -614,6 +632,68 @@ static int play_prompt_to_channel(struct conference_bridge *conference_bridge, s
return res;
}
+static void handle_video_on_join(struct conference_bridge *conference_bridge, struct conference_bridge_user *conference_bridge_user)
+{
+ /* only automatically set video source for marked users */
+ if (!ast_test_flag(&conference_bridge_user->u_profile, USER_OPT_MARKEDUSER)) {
+ return;
+ }
+
+ if (ast_test_flag(&conference_bridge->b_profile, BRIDGE_OPT_VIDEO_SRC_FIRST_MARKED)) {
+ int set = 1;
+ struct conference_bridge_user *tmp_user = NULL;
+ ao2_lock(conference_bridge);
+ /* see if anyone is already the video src */
+ AST_LIST_TRAVERSE(&conference_bridge->users_list, tmp_user, list) {
+ if (tmp_user == conference_bridge_user) {
+ continue;
+ }
+ if (ast_bridge_is_video_src(conference_bridge->bridge, tmp_user->chan)) {
+ set = 0;
+ break;
+ }
+ }
+ ao2_unlock(conference_bridge);
+ if (set) {
+ ast_bridge_set_single_src_video_mode(conference_bridge->bridge, conference_bridge_user->chan);
+ }
+ } else if (ast_test_flag(&conference_bridge->b_profile, BRIDGE_OPT_VIDEO_SRC_LAST_MARKED)) {
+ /* we joined and are video capable, we override anyone else that may have already been the video feed */
+ ast_bridge_set_single_src_video_mode(conference_bridge->bridge, conference_bridge_user->chan);
+ }
+}
+
+static void handle_video_on_exit(struct conference_bridge *conference_bridge, struct conference_bridge_user *conference_bridge_user)
+{
+ struct conference_bridge_user *tmp_user = NULL;
+
+ /* if this isn't a video source, nothing to update */
+ if (!ast_bridge_is_video_src(conference_bridge->bridge, conference_bridge_user->chan)) {
+ return;
+ }
+
+ ast_bridge_remove_video_src(conference_bridge->bridge, conference_bridge_user->chan);
+
+ /* if the video_mode isn't set to automatically pick the video source, do nothing on exit. */
+ if (!ast_test_flag(&conference_bridge->b_profile, BRIDGE_OPT_VIDEO_SRC_FIRST_MARKED) &&
+ !ast_test_flag(&conference_bridge->b_profile, BRIDGE_OPT_VIDEO_SRC_LAST_MARKED)) {
+ return;
+ }
+
+ /* Make the next avaliable marked user the video src. */
+ ao2_lock(conference_bridge);
+ AST_LIST_TRAVERSE(&conference_bridge->users_list, tmp_user, list) {
+ if (tmp_user == conference_bridge_user) {
+ continue;
+ }
+ if (ast_test_flag(&tmp_user->u_profile, USER_OPT_MARKEDUSER)) {
+ ast_bridge_set_single_src_video_mode(conference_bridge->bridge, tmp_user->chan);
+ break;
+ }
+ }
+ ao2_unlock(conference_bridge);
+}
+
/*!
* \brief Perform post-joining marked specific actions
*
@@ -627,7 +707,8 @@ static int post_join_marked(struct conference_bridge *conference_bridge, struct
if (ast_test_flag(&conference_bridge_user->u_profile, USER_OPT_MARKEDUSER)) {
struct conference_bridge_user *other_conference_bridge_user = NULL;
- /* If we are not the first marked user to join just bail out now */
+ /* If we are not the first user to join, then the users are already
+ * in the conference so we do not need to update them. */
if (conference_bridge->markedusers >= 2) {
return 0;
}
@@ -664,7 +745,6 @@ static int post_join_marked(struct conference_bridge *conference_bridge, struct
other_conference_bridge_user->features.mute = 0;
}
}
-
} else {
/* If a marked user already exists in the conference bridge we can just bail out now */
if (conference_bridge->markedusers) {
@@ -859,6 +939,10 @@ static struct conference_bridge *join_conference_bridge(const char *name, struct
/* Set the internal mixing interval on the bridge from the bridge profile */
ast_bridge_set_mixing_interval(conference_bridge->bridge, conference_bridge->b_profile.mix_interval);
+ if (ast_test_flag(&conference_bridge->b_profile, BRIDGE_OPT_VIDEO_SRC_FOLLOW_TALKER)) {
+ ast_bridge_set_talker_src_video_mode(conference_bridge->bridge);
+ }
+
/* Setup lock for playback channel */
ast_mutex_init(&conference_bridge->playback_lock);
@@ -1370,6 +1454,8 @@ static int confbridge_exec(struct ast_channel *chan, const char *data)
}
}
+ handle_video_on_join(conference_bridge, &conference_bridge_user);
+
/* Join our conference bridge for real */
send_join_event(conference_bridge_user.chan, conference_bridge->name);
ast_bridge_join(conference_bridge->bridge,
@@ -1379,6 +1465,9 @@ static int confbridge_exec(struct ast_channel *chan, const char *data)
&conference_bridge_user.tech_args);
send_leave_event(conference_bridge_user.chan, conference_bridge->name);
+
+ handle_video_on_exit(conference_bridge, &conference_bridge_user);
+
/* if this user has a intro, play it when leaving */
if (!quiet && !ast_strlen_zero(conference_bridge_user.name_rec_location)) {
ast_autoservice_start(chan);
@@ -1681,6 +1770,11 @@ static int execute_menu_entry(struct conference_bridge *conference_bridge,
break;
case MENU_ACTION_NOOP:
break;
+ case MENU_ACTION_SET_SINGLE_VIDEO_SRC:
+ ao2_lock(conference_bridge);
+ ast_bridge_set_single_src_video_mode(conference_bridge->bridge, bridge_channel->chan);
+ ao2_unlock(conference_bridge);
+ break;
}
}
return res;
@@ -2436,6 +2530,55 @@ static int action_confbridgestoprecord(struct mansession *s, const struct messag
return 0;
}
+static int action_confbridgesetsinglevideosrc(struct mansession *s, const struct message *m)
+{
+ const char *conference = astman_get_header(m, "Conference");
+ const char *channel = astman_get_header(m, "Channel");
+ struct conference_bridge_user *participant = NULL;
+ struct conference_bridge *bridge = NULL;
+ struct conference_bridge tmp;
+
+ if (ast_strlen_zero(conference)) {
+ astman_send_error(s, m, "No Conference name provided.");
+ return 0;
+ }
+ if (ast_strlen_zero(channel)) {
+ astman_send_error(s, m, "No channel name provided.");
+ return 0;
+ }
+ if (!ao2_container_count(conference_bridges)) {
+ astman_send_error(s, m, "No active conferences.");
+ return 0;
+ }
+
+ ast_copy_string(tmp.name, conference, sizeof(tmp.name));
+ bridge = ao2_find(conference_bridges, &tmp, OBJ_POINTER);
+ if (!bridge) {
+ astman_send_error(s, m, "No Conference by that name found.");
+ return 0;
+ }
+
+ /* find channel and set as video src. */
+ ao2_lock(bridge);
+ AST_LIST_TRAVERSE(&bridge->users_list, participant, list) {
+ if (!strncmp(channel, participant->chan->name, strlen(channel))) {
+ ast_bridge_set_single_src_video_mode(bridge->bridge, participant->chan);
+ break;
+ }
+ }
+ ao2_unlock(bridge);
+ ao2_ref(bridge, -1);
+
+ /* do not access participant after bridge unlock. We are just
+ * using this check to see if it was found or not */
+ if (!participant) {
+ astman_send_error(s, m, "No channel by that name found in conference.");
+ return 0;
+ }
+ astman_send_ack(s, m, "Conference single video source set.");
+ return 0;
+}
+
static int func_confbridge_info(struct ast_channel *chan, const char *cmd, char *data, char *buf, size_t len)
{
char *parse = NULL;
@@ -2567,6 +2710,7 @@ static int load_module(void)
res |= ast_manager_register_xml("ConfbridgeLock", EVENT_FLAG_CALL, action_confbridgelock);
res |= ast_manager_register_xml("ConfbridgeStartRecord", EVENT_FLAG_CALL, action_confbridgestartrecord);
res |= ast_manager_register_xml("ConfbridgeStopRecord", EVENT_FLAG_CALL, action_confbridgestoprecord);
+ res |= ast_manager_register_xml("ConfbridgeSetSingleVideoSrc", EVENT_FLAG_CALL, action_confbridgesetsinglevideosrc);
conf_load_config(0);
return res;
diff --git a/apps/app_voicemail.c b/apps/app_voicemail.c
index 09e20ad46..5e342bda0 100644
--- a/apps/app_voicemail.c
+++ b/apps/app_voicemail.c
@@ -903,8 +903,6 @@ static int add_email_attachment(FILE *p, struct ast_vm_user *vmu, char *format,
static int is_valid_dtmf(const char *key);
static void read_password_from_file(const char *secretfn, char *password, int passwordlen);
static int write_password_to_file(const char *secretfn, const char *password);
-static const char *substitute_escapes(const char *value);
-static void free_user(struct ast_vm_user *vmu);
struct ao2_container *inprocess_container;
@@ -996,8 +994,7 @@ static char *strip_control_and_high(const char *input, char *buf, size_t buflen)
* - the dialcontext
* - the exitcontext
* - vmmaxsecs, vmmaxmsg, maxdeletedmsg
- * - volume gain
- * - emailsubject, emailbody set to NULL
+ * - volume gain.
*/
static void populate_defaults(struct ast_vm_user *vmu)
{
@@ -1048,10 +1045,6 @@ static void apply_option(struct ast_vm_user *vmu, const char *var, const char *v
ast_copy_string(vmu->attachfmt, value, sizeof(vmu->attachfmt));
} else if (!strcasecmp(var, "serveremail")) {
ast_copy_string(vmu->serveremail, value, sizeof(vmu->serveremail));
- } else if (!strcasecmp(var, "emailbody")) {
- vmu->emailbody = ast_strdup(substitute_escapes(value));
- } else if (!strcasecmp(var, "emailsubject")) {
- vmu->emailsubject = ast_strdup(substitute_escapes(value));
} else if (!strcasecmp(var, "language")) {
ast_copy_string(vmu->language, value, sizeof(vmu->language));
} else if (!strcasecmp(var, "tz")) {
@@ -1389,7 +1382,7 @@ static struct ast_vm_user *find_user_realtime(struct ast_vm_user *ivm, const cha
ast_variables_destroy(var);
} else {
if (!ivm)
- free_user(retval);
+ ast_free(retval);
retval = NULL;
}
}
@@ -10644,9 +10637,7 @@ AST_TEST_DEFINE(test_voicemail_vmuser)
"envelope=yes|moveheard=yes|sayduration=yes|saydurationm=5|forcename=yes|"
"forcegreetings=yes|callback=somecontext|dialout=somecontext2|"
"exitcontext=somecontext3|minsecs=10|maxsecs=100|nextaftercmd=yes|"
- "backupdeleted=50|volgain=1.3|passwordlocation=spooldir|emailbody="
- "Dear ${VM_NAME}:\n\n\tYou were just left a ${VM_DUR} long message|emailsubject="
- "[PBX]: New message ${VM_MSGNUM} in mailbox ${VM_MAILBOX}";
+ "backupdeleted=50|volgain=1.3|passwordlocation=spooldir";
#ifdef IMAP_STORAGE
static const char option_string2[] = "imapuser=imapuser|imappassword=imappasswd|"
"imapfolder=INBOX|imapvmshareid=6000";
@@ -10668,7 +10659,6 @@ AST_TEST_DEFINE(test_voicemail_vmuser)
return AST_TEST_NOT_RUN;
}
ast_set_flag(vmu, VM_ALLOCED);
- populate_defaults(vmu);
apply_options(vmu, options_string);
@@ -10684,14 +10674,6 @@ AST_TEST_DEFINE(test_voicemail_vmuser)
ast_test_status_update(test, "Parse failure for serveremail option\n");
res = 1;
}
- if (!vmu->emailsubject || strcasecmp(vmu->emailsubject, "[PBX]: New message ${VM_MSGNUM} in mailbox ${VM_MAILBOX}")) {
- ast_test_status_update(test, "Parse failure for emailsubject option\n");
- res = 1;
- }
- if (!vmu->emailbody || strcasecmp(vmu->emailbody, "Dear ${VM_NAME}:\n\n\tYou were just left a ${VM_DUR} long message")) {
- ast_test_status_update(test, "Parse failure for emailbody option\n");
- res = 1;
- }
if (strcasecmp(vmu->zonetag, "central")) {
ast_test_status_update(test, "Parse failure for tz option\n");
res = 1;
diff --git a/apps/confbridge/conf_config_parser.c b/apps/confbridge/conf_config_parser.c
index 3d1f31326..8864f52bc 100644
--- a/apps/confbridge/conf_config_parser.c
+++ b/apps/confbridge/conf_config_parser.c
@@ -284,6 +284,14 @@ static int set_bridge_option(const char *name, const char *value, struct bridge_
}
} else if (!strcasecmp(name, "record_conference")) {
ast_set2_flag(b_profile, ast_true(value), BRIDGE_OPT_RECORD_CONFERENCE);
+ } else if (!strcasecmp(name, "video_mode")) {
+ if (!strcasecmp(value, "first_marked")) {
+ ast_set_flag(b_profile, BRIDGE_OPT_VIDEO_SRC_FIRST_MARKED);
+ } else if (!strcasecmp(value, "last_marked")) {
+ ast_set_flag(b_profile, BRIDGE_OPT_VIDEO_SRC_LAST_MARKED);
+ } else if (!strcasecmp(value, "follow_talker")) {
+ ast_set_flag(b_profile, BRIDGE_OPT_VIDEO_SRC_FOLLOW_TALKER);
+ }
} else if (!strcasecmp(name, "max_members")) {
if (sscanf(value, "%30u", &b_profile->max_members) != 1) {
return -1;
@@ -534,6 +542,7 @@ static int add_action_to_menu_entry(struct conf_menu_entry *menu_entry, enum con
case MENU_ACTION_ADMIN_TOGGLE_LOCK:
case MENU_ACTION_ADMIN_KICK_LAST:
case MENU_ACTION_LEAVE:
+ case MENU_ACTION_SET_SINGLE_VIDEO_SRC:
break;
case MENU_ACTION_PLAYBACK:
case MENU_ACTION_PLAYBACK_AND_CONTINUE:
@@ -649,6 +658,8 @@ static int add_menu_entry(struct conf_menu *menu, const char *dtmf, const char *
res |= add_action_to_menu_entry(menu_entry, MENU_ACTION_ADMIN_KICK_LAST, NULL);
} else if (!strcasecmp(action, "leave_conference")) {
res |= add_action_to_menu_entry(menu_entry, MENU_ACTION_LEAVE, NULL);
+ } else if (!strcasecmp(action, "set_as_single_video_src")) {
+ res |= add_action_to_menu_entry(menu_entry, MENU_ACTION_SET_SINGLE_VIDEO_SRC, NULL);
} else if (!strncasecmp(action, "dialplan_exec(", 14)) {
ast_copy_string(buf, action, sizeof(buf));
action_args = buf;
@@ -983,6 +994,16 @@ static char *handle_cli_confbridge_show_bridge_profile(struct ast_cli_entry *e,
ast_cli(a->fd,"Max Members: No Limit\n");
}
+ if (b_profile.flags & BRIDGE_OPT_VIDEO_SRC_LAST_MARKED) {
+ ast_cli(a->fd, "Video Mode: last_marked\n");
+ } else if (b_profile.flags & BRIDGE_OPT_VIDEO_SRC_FIRST_MARKED) {
+ ast_cli(a->fd, "Video Mode: first_marked\n");
+ } else if (b_profile.flags & BRIDGE_OPT_VIDEO_SRC_FOLLOW_TALKER) {
+ ast_cli(a->fd, "Video Mode: follow_talker\n");
+ } else {
+ ast_cli(a->fd, "Video Mode: no video\n");
+ }
+
ast_cli(a->fd,"sound_join: %s\n", conf_get_sound(CONF_SOUND_JOIN, b_profile.sounds));
ast_cli(a->fd,"sound_leave: %s\n", conf_get_sound(CONF_SOUND_LEAVE, b_profile.sounds));
ast_cli(a->fd,"sound_only_person: %s\n", conf_get_sound(CONF_SOUND_ONLY_PERSON, b_profile.sounds));
@@ -1142,6 +1163,9 @@ static char *handle_cli_confbridge_show_menu(struct ast_cli_entry *e, int cmd, s
case MENU_ACTION_LEAVE:
ast_cli(a->fd, "leave_conference");
break;
+ case MENU_ACTION_SET_SINGLE_VIDEO_SRC:
+ ast_cli(a->fd, "set_as_single_video_src");
+ break;
}
action_num++;
}
diff --git a/apps/confbridge/include/confbridge.h b/apps/confbridge/include/confbridge.h
index de467b5f7..7a2f6bb07 100644
--- a/apps/confbridge/include/confbridge.h
+++ b/apps/confbridge/include/confbridge.h
@@ -61,6 +61,9 @@ enum user_profile_flags {
enum bridge_profile_flags {
BRIDGE_OPT_RECORD_CONFERENCE = (1 << 0), /*!< Set if the conference should be recorded */
+ BRIDGE_OPT_VIDEO_SRC_LAST_MARKED = (1 << 1), /*!< Set if conference should feed video of last marked user to all participants. */
+ BRIDGE_OPT_VIDEO_SRC_FIRST_MARKED = (1 << 2), /*!< Set if conference should feed video of first marked user to all participants. */
+ BRIDGE_OPT_VIDEO_SRC_FOLLOW_TALKER = (1 << 3), /*!< Set if conference set the video feed to follow the loudest talker. */
};
enum conf_menu_action_id {
@@ -78,6 +81,7 @@ enum conf_menu_action_id {
MENU_ACTION_ADMIN_KICK_LAST,
MENU_ACTION_LEAVE,
MENU_ACTION_NOOP,
+ MENU_ACTION_SET_SINGLE_VIDEO_SRC,
};
/*! The conference menu action contains both
diff --git a/bridges/bridge_softmix.c b/bridges/bridge_softmix.c
index 4350905fd..8828d640a 100644
--- a/bridges/bridge_softmix.c
+++ b/bridges/bridge_softmix.c
@@ -70,6 +70,20 @@ ASTERISK_FILE_VERSION(__FILE__, "$Revision$")
#define DEFAULT_SOFTMIX_SILENCE_THRESHOLD 2500
#define DEFAULT_SOFTMIX_TALKING_THRESHOLD 160
+#define DEFAULT_ENERGY_HISTORY_LEN 150
+
+struct video_follow_talker_data {
+ /*! audio energy history */
+ int energy_history[DEFAULT_ENERGY_HISTORY_LEN];
+ /*! The current slot being used in the history buffer, this
+ * increments and wraps around */
+ int energy_history_cur_slot;
+ /*! The current energy sum used for averages. */
+ int energy_accum;
+ /*! The current energy average */
+ int energy_average;
+};
+
/*! \brief Structure which contains per-channel mixing information */
struct softmix_channel {
/*! Lock to protect this structure */
@@ -93,6 +107,8 @@ struct softmix_channel {
short final_buf[MAX_DATALEN];
/*! Buffer containing only the audio from the channel */
short our_buf[MAX_DATALEN];
+ /*! Data pertaining to talker mode for video conferencing */
+ struct video_follow_talker_data video_talker;
};
struct softmix_bridge_data {
@@ -419,12 +435,24 @@ static void softmix_pass_dtmf(struct ast_bridge *bridge, struct ast_bridge_chann
}
}
+static void softmix_pass_video(struct ast_bridge *bridge, struct ast_bridge_channel *bridge_channel, struct ast_frame *frame)
+{
+ struct ast_bridge_channel *tmp;
+ AST_LIST_TRAVERSE(&bridge->channels, tmp, entry) {
+ if (tmp->suspended) {
+ continue;
+ }
+ ast_write(tmp->chan, frame);
+ }
+}
+
/*! \brief Function called when a channel writes a frame into the bridge */
static enum ast_bridge_write_result softmix_bridge_write(struct ast_bridge *bridge, struct ast_bridge_channel *bridge_channel, struct ast_frame *frame)
{
struct softmix_channel *sc = bridge_channel->bridge_pvt;
struct softmix_bridge_data *softmix_data = bridge->bridge_pvt;
int totalsilence = 0;
+ int cur_energy = 0;
int silence_threshold = bridge_channel->tech_args.silence_threshold ?
bridge_channel->tech_args.silence_threshold :
DEFAULT_SOFTMIX_SILENCE_THRESHOLD;
@@ -434,18 +462,52 @@ static enum ast_bridge_write_result softmix_bridge_write(struct ast_bridge *brid
/* Only accept audio frames, all others are unsupported */
if (frame->frametype == AST_FRAME_DTMF_END || frame->frametype == AST_FRAME_DTMF_BEGIN) {
softmix_pass_dtmf(bridge, bridge_channel, frame);
- goto no_audio;
- } else if (frame->frametype != AST_FRAME_VOICE) {
+ goto bridge_write_cleanup;
+ } else if (frame->frametype != AST_FRAME_VOICE && frame->frametype != AST_FRAME_VIDEO) {
res = AST_BRIDGE_WRITE_UNSUPPORTED;
- goto no_audio;
+ goto bridge_write_cleanup;
} else if (frame->datalen == 0) {
- goto no_audio;
+ goto bridge_write_cleanup;
+ }
+
+ /* Determine if this video frame should be distributed or not */
+ if (frame->frametype == AST_FRAME_VIDEO) {
+ switch (bridge->video_mode.mode) {
+ case AST_BRIDGE_VIDEO_MODE_NONE:
+ break;
+ case AST_BRIDGE_VIDEO_MODE_SINGLE_SRC:
+ if (ast_bridge_is_video_src(bridge, bridge_channel->chan)) {
+ softmix_pass_video(bridge, bridge_channel, frame);
+ }
+ break;
+ case AST_BRIDGE_VIDEO_MODE_TALKER_SRC:
+ ast_mutex_lock(&sc->lock);
+ ast_bridge_update_talker_src_video_mode(bridge, bridge_channel->chan, sc->video_talker.energy_average, ast_format_get_video_mark(&frame->subclass.format));
+ ast_mutex_unlock(&sc->lock);
+ if (ast_bridge_is_video_src(bridge, bridge_channel->chan)) {
+ softmix_pass_video(bridge, bridge_channel, frame);
+ }
+ break;
+ }
+ goto bridge_write_cleanup;
}
/* If we made it here, we are going to write the frame into the conference */
ast_mutex_lock(&sc->lock);
+ ast_dsp_silence_with_energy(sc->dsp, frame, &totalsilence, &cur_energy);
+
+ if (bridge->video_mode.mode == AST_BRIDGE_VIDEO_MODE_TALKER_SRC) {
+ int cur_slot = sc->video_talker.energy_history_cur_slot;
+ sc->video_talker.energy_accum -= sc->video_talker.energy_history[cur_slot];
+ sc->video_talker.energy_accum += cur_energy;
+ sc->video_talker.energy_history[cur_slot] = cur_energy;
+ sc->video_talker.energy_average = sc->video_talker.energy_accum / DEFAULT_ENERGY_HISTORY_LEN;
+ sc->video_talker.energy_history_cur_slot++;
+ if (sc->video_talker.energy_history_cur_slot == DEFAULT_ENERGY_HISTORY_LEN) {
+ sc->video_talker.energy_history_cur_slot = 0; /* wrap around */
+ }
+ }
- ast_dsp_silence(sc->dsp, frame, &totalsilence);
if (totalsilence < silence_threshold) {
if (!sc->talking) {
update_talking = 1;
@@ -487,7 +549,7 @@ static enum ast_bridge_write_result softmix_bridge_write(struct ast_bridge *brid
return res;
-no_audio:
+bridge_write_cleanup:
/* Even though the frame is not being written into the conference because it is not audio,
* we should use this opportunity to check to see if a frame is ready to be written out from
* the conference to the channel. */
@@ -817,7 +879,7 @@ softmix_cleanup:
static struct ast_bridge_technology softmix_bridge = {
.name = "softmix",
- .capabilities = AST_BRIDGE_CAPABILITY_MULTIMIX | AST_BRIDGE_CAPABILITY_THREAD | AST_BRIDGE_CAPABILITY_MULTITHREADED | AST_BRIDGE_CAPABILITY_OPTIMIZE,
+ .capabilities = AST_BRIDGE_CAPABILITY_MULTIMIX | AST_BRIDGE_CAPABILITY_THREAD | AST_BRIDGE_CAPABILITY_MULTITHREADED | AST_BRIDGE_CAPABILITY_OPTIMIZE | AST_BRIDGE_CAPABILITY_VIDEO,
.preference = AST_BRIDGE_PREFERENCE_LOW,
.create = softmix_bridge_create,
.destroy = softmix_bridge_destroy,
diff --git a/configs/confbridge.conf.sample b/configs/confbridge.conf.sample
index 1781b88a0..408387012 100644
--- a/configs/confbridge.conf.sample
+++ b/configs/confbridge.conf.sample
@@ -168,6 +168,26 @@ type=bridge
; larger amounts of delay into the bridge. Valid values here are 10, 20, 40,
; or 80. By default 20ms is used.
+;video_mode = follow_talker ; Sets how confbridge handles video distribution to the conference participants.
+ ; Note that participants wanting to view and be the source of a video feed
+ ; _MUST_ be sharing the same video codec.
+ ; --- MODES ---
+ ; none: No video sources are set by default in the conference. It is still
+ ; possible for a user to be set as a video source via AMI or DTMF action
+ ; at any time.
+ ;
+ ; follow_talker: The video feed will follow whoever is talking and providing video.
+ ;
+ ; last_marked: The last marked user to join the conference with video capabilities
+ ; will be the single source of video distributed to all participants.
+ ; If multiple marked users are capable of video, the last one to join
+ ; is always the source, when that user leaves it goes to the one who
+ ; joined before them.
+ ;
+ ; first_marked: The first marked user to join the conference with video capabilities
+ ; is the single source of video distribution among all participants. If
+ ; that user leaves, the marked user to join after them becomes the source.
+
; All sounds in the conference are customizable using the bridge profile options below.
; Simply state the option followed by the filename or full path of the filename after
; the option. Example: sound_had_joined=conf-hasjoin This will play the conf-hasjoin
@@ -264,6 +284,8 @@ type=bridge
; admin_toggle_conference_lock ; This action allows an Admin to toggle locking and
; unlocking the conference. Non admins can not use
; this action even if it is in their menu.
+; set_as_single_video_src ; This action allows any user to set themselves as the
+ ; single video source distributed to all participants.
[sample_user_menu]
type=menu
diff --git a/include/asterisk/bridging.h b/include/asterisk/bridging.h
index 58f61d6fd..849f88741 100644
--- a/include/asterisk/bridging.h
+++ b/include/asterisk/bridging.h
@@ -167,12 +167,48 @@ struct ast_bridge_channel {
AST_LIST_ENTRY(ast_bridge_channel) entry;
};
+enum ast_bridge_video_mode_type {
+ /*! Video is not allowed in the bridge */
+ AST_BRIDGE_VIDEO_MODE_NONE = 0,
+ /*! A single user is picked as the only distributed of video across the bridge */
+ AST_BRIDGE_VIDEO_MODE_SINGLE_SRC,
+ /*! A single user's video feed is distributed to all bridge channels, but
+ * that feed is automatically picked based on who is talking the most. */
+ AST_BRIDGE_VIDEO_MODE_TALKER_SRC,
+};
+
+/*! This is used for both SINGLE_SRC mode to set what channel
+ * should be the current single video feed */
+struct ast_bridge_video_single_src_data {
+ /*! Only accept video coming from this channel */
+ struct ast_channel *chan_vsrc;
+};
+
+/*! This is used for both SINGLE_SRC_TALKER mode to set what channel
+ * should be the current single video feed */
+struct ast_bridge_video_talker_src_data {
+ /*! Only accept video coming from this channel */
+ struct ast_channel *chan_vsrc;
+ int average_talking_energy;
+};
+
+struct ast_bridge_video_mode {
+ enum ast_bridge_video_mode_type mode;
+ /* Add data for all the video modes here. */
+ union {
+ struct ast_bridge_video_single_src_data single_src_data;
+ struct ast_bridge_video_talker_src_data talker_src_data;
+ } mode_data;
+};
+
/*!
* \brief Structure that contains information about a bridge
*/
struct ast_bridge {
/*! Number of channels participating in the bridge */
int num;
+ /*! The video mode this bridge is using */
+ struct ast_bridge_video_mode video_mode;
/*! The internal sample rate this bridge is mixed at when multiple channels are being mixed.
* If this value is 0, the bridge technology may auto adjust the internal mixing rate. */
unsigned int internal_sample_rate;
@@ -475,6 +511,31 @@ void ast_bridge_set_internal_sample_rate(struct ast_bridge *bridge, unsigned int
*/
void ast_bridge_set_mixing_interval(struct ast_bridge *bridge, unsigned int mixing_interval);
+/*!
+ * \brief Set a bridge to feed a single video source to all participants.
+ */
+void ast_bridge_set_single_src_video_mode(struct ast_bridge *bridge, struct ast_channel *video_src_chan);
+
+/*!
+ * \brief Set the bridge to pick the strongest talker supporting
+ * video as the single source video feed
+ */
+void ast_bridge_set_talker_src_video_mode(struct ast_bridge *bridge);
+
+/*!
+ * \brief Update information about talker energy for talker src video mode.
+ */
+void ast_bridge_update_talker_src_video_mode(struct ast_bridge *bridge, struct ast_channel *chan, int talker_energy, int is_keyfame);
+
+/*!
+ * \brief Determine if a channel is a video src for the bridge
+ */
+int ast_bridge_is_video_src(struct ast_bridge *bridge, struct ast_channel *chan);
+
+/*!
+ * \brief remove a channel as a source of video for the bridge.
+ */
+void ast_bridge_remove_video_src(struct ast_bridge *bridge, struct ast_channel *chan);
#if defined(__cplusplus) || defined(c_plusplus)
}
diff --git a/include/asterisk/dsp.h b/include/asterisk/dsp.h
index 79e4da695..333415200 100644
--- a/include/asterisk/dsp.h
+++ b/include/asterisk/dsp.h
@@ -109,6 +109,11 @@ struct ast_frame *ast_dsp_process(struct ast_channel *chan, struct ast_dsp *dsp,
number of seconds of silence */
int ast_dsp_silence(struct ast_dsp *dsp, struct ast_frame *f, int *totalsilence);
+/*! \brief Return non-zero if this is silence. Updates "totalsilence" with the total
+ number of seconds of silence. Returns the average energy of the samples in the frame
+ in frames_energy variable. */
+int ast_dsp_silence_with_energy(struct ast_dsp *dsp, struct ast_frame *f, int *totalsilence, int *frames_energy);
+
/*!
* \brief Return non-zero if this is noise. Updates "totalnoise" with the total
* number of seconds of noise
diff --git a/main/bridging.c b/main/bridging.c
index 444eea8d5..1563e4789 100644
--- a/main/bridging.c
+++ b/main/bridging.c
@@ -50,6 +50,8 @@ static AST_RWLIST_HEAD_STATIC(bridge_technologies, ast_bridge_technology);
/* Grow rate of bridge array of channels */
#define BRIDGE_ARRAY_GROW 32
+static void cleanup_video_mode(struct ast_bridge *bridge);
+
/*! Default DTMF keys for built in features */
static char builtin_features_dtmf[AST_BRIDGE_BUILTIN_END][MAXIMUM_DTMF_FEATURE_STRING];
@@ -457,6 +459,8 @@ static void destroy_bridge(void *obj)
/* Drop the array of channels */
ast_free(bridge->array);
+ cleanup_video_mode(bridge);
+
return;
}
@@ -1470,3 +1474,116 @@ void ast_bridge_set_internal_sample_rate(struct ast_bridge *bridge, unsigned int
bridge->internal_sample_rate = sample_rate;
ao2_unlock(bridge);
}
+
+static void cleanup_video_mode(struct ast_bridge *bridge)
+{
+ switch (bridge->video_mode.mode) {
+ case AST_BRIDGE_VIDEO_MODE_NONE:
+ break;
+ case AST_BRIDGE_VIDEO_MODE_SINGLE_SRC:
+ if (bridge->video_mode.mode_data.single_src_data.chan_vsrc) {
+ ast_channel_unref(bridge->video_mode.mode_data.single_src_data.chan_vsrc);
+ }
+ break;
+ case AST_BRIDGE_VIDEO_MODE_TALKER_SRC:
+ if (bridge->video_mode.mode_data.talker_src_data.chan_vsrc) {
+ ast_channel_unref(bridge->video_mode.mode_data.talker_src_data.chan_vsrc);
+ }
+ }
+ memset(&bridge->video_mode, 0, sizeof(bridge->video_mode));
+}
+
+void ast_bridge_set_single_src_video_mode(struct ast_bridge *bridge, struct ast_channel *video_src_chan)
+{
+ ao2_lock(bridge);
+ cleanup_video_mode(bridge);
+ bridge->video_mode.mode = AST_BRIDGE_VIDEO_MODE_SINGLE_SRC;
+ bridge->video_mode.mode_data.single_src_data.chan_vsrc = ast_channel_ref(video_src_chan);
+ ast_indicate(video_src_chan, AST_CONTROL_VIDUPDATE);
+ ao2_unlock(bridge);
+}
+
+void ast_bridge_set_talker_src_video_mode(struct ast_bridge *bridge)
+{
+ ao2_lock(bridge);
+ cleanup_video_mode(bridge);
+ bridge->video_mode.mode = AST_BRIDGE_VIDEO_MODE_TALKER_SRC;
+ ao2_unlock(bridge);
+}
+
+void ast_bridge_update_talker_src_video_mode(struct ast_bridge *bridge, struct ast_channel *chan, int talker_energy, int is_keyframe)
+{
+ struct ast_bridge_video_talker_src_data *data;
+ /* If the channel doesn't support video, we don't care about it */
+ if (!ast_format_cap_has_type(chan->nativeformats, AST_FORMAT_TYPE_VIDEO)) {
+ return;
+ }
+
+ ao2_lock(bridge);
+ data = &bridge->video_mode.mode_data.talker_src_data;
+
+ if (data->chan_vsrc == chan) {
+ data->average_talking_energy = talker_energy;
+ } else if ((data->average_talking_energy < talker_energy) && is_keyframe) {
+ if (data->chan_vsrc) {
+ ast_channel_unref(data->chan_vsrc);
+ }
+ data->chan_vsrc = ast_channel_ref(chan);
+ data->average_talking_energy = talker_energy;
+ ast_indicate(chan, AST_CONTROL_VIDUPDATE);
+ } else if ((data->average_talking_energy < talker_energy) && !is_keyframe) {
+ ast_indicate(chan, AST_CONTROL_VIDUPDATE);
+ } else if (!data->chan_vsrc && is_keyframe) {
+ data->chan_vsrc = ast_channel_ref(chan);
+ data->average_talking_energy = talker_energy;
+ ast_indicate(chan, AST_CONTROL_VIDUPDATE);
+ }
+ ao2_unlock(bridge);
+}
+
+int ast_bridge_is_video_src(struct ast_bridge *bridge, struct ast_channel *chan)
+{
+ int res = 0;
+
+ ao2_lock(bridge);
+ switch (bridge->video_mode.mode) {
+ case AST_BRIDGE_VIDEO_MODE_NONE:
+ break;
+ case AST_BRIDGE_VIDEO_MODE_SINGLE_SRC:
+ if (bridge->video_mode.mode_data.single_src_data.chan_vsrc == chan) {
+ res = 1;
+ }
+ break;
+ case AST_BRIDGE_VIDEO_MODE_TALKER_SRC:
+ if (bridge->video_mode.mode_data.talker_src_data.chan_vsrc == chan) {
+ res = 1;
+ }
+ }
+ ao2_unlock(bridge);
+ return res;
+}
+
+void ast_bridge_remove_video_src(struct ast_bridge *bridge, struct ast_channel *chan)
+{
+ ao2_lock(bridge);
+ switch (bridge->video_mode.mode) {
+ case AST_BRIDGE_VIDEO_MODE_NONE:
+ break;
+ case AST_BRIDGE_VIDEO_MODE_SINGLE_SRC:
+ if (bridge->video_mode.mode_data.single_src_data.chan_vsrc == chan) {
+ if (bridge->video_mode.mode_data.single_src_data.chan_vsrc) {
+ ast_channel_unref(bridge->video_mode.mode_data.single_src_data.chan_vsrc);
+ }
+ bridge->video_mode.mode_data.single_src_data.chan_vsrc = NULL;
+ }
+ break;
+ case AST_BRIDGE_VIDEO_MODE_TALKER_SRC:
+ if (bridge->video_mode.mode_data.talker_src_data.chan_vsrc == chan) {
+ if (bridge->video_mode.mode_data.talker_src_data.chan_vsrc) {
+ ast_channel_unref(bridge->video_mode.mode_data.talker_src_data.chan_vsrc);
+ }
+ bridge->video_mode.mode_data.talker_src_data.chan_vsrc = NULL;
+ }
+ }
+ ao2_unlock(bridge);
+}
diff --git a/main/dsp.c b/main/dsp.c
index 9e3e2e724..ee1891823 100644
--- a/main/dsp.c
+++ b/main/dsp.c
@@ -1103,7 +1103,7 @@ int ast_dsp_call_progress(struct ast_dsp *dsp, struct ast_frame *inf)
return __ast_dsp_call_progress(dsp, inf->data.ptr, inf->datalen / 2);
}
-static int __ast_dsp_silence_noise(struct ast_dsp *dsp, short *s, int len, int *totalsilence, int *totalnoise)
+static int __ast_dsp_silence_noise(struct ast_dsp *dsp, short *s, int len, int *totalsilence, int *totalnoise, int *frames_energy)
{
int accum;
int x;
@@ -1163,6 +1163,9 @@ static int __ast_dsp_silence_noise(struct ast_dsp *dsp, short *s, int len, int *
if (totalnoise) {
*totalnoise = dsp->totalnoise;
}
+ if (frames_energy) {
+ *frames_energy = accum;
+ }
return res;
}
@@ -1318,7 +1321,25 @@ int ast_dsp_silence(struct ast_dsp *dsp, struct ast_frame *f, int *totalsilence)
}
s = f->data.ptr;
len = f->datalen/2;
- return __ast_dsp_silence_noise(dsp, s, len, totalsilence, NULL);
+ return __ast_dsp_silence_noise(dsp, s, len, totalsilence, NULL, NULL);
+}
+
+int ast_dsp_silence_with_energy(struct ast_dsp *dsp, struct ast_frame *f, int *totalsilence, int *frames_energy)
+{
+ short *s;
+ int len;
+
+ if (f->frametype != AST_FRAME_VOICE) {
+ ast_log(LOG_WARNING, "Can't calculate silence on a non-voice frame\n");
+ return 0;
+ }
+ if (!ast_format_is_slinear(&f->subclass.format)) {
+ ast_log(LOG_WARNING, "Can only calculate silence on signed-linear frames :(\n");
+ return 0;
+ }
+ s = f->data.ptr;
+ len = f->datalen/2;
+ return __ast_dsp_silence_noise(dsp, s, len, totalsilence, NULL, frames_energy);
}
int ast_dsp_noise(struct ast_dsp *dsp, struct ast_frame *f, int *totalnoise)
@@ -1336,7 +1357,7 @@ int ast_dsp_noise(struct ast_dsp *dsp, struct ast_frame *f, int *totalnoise)
}
s = f->data.ptr;
len = f->datalen/2;
- return __ast_dsp_silence_noise(dsp, s, len, NULL, totalnoise);
+ return __ast_dsp_silence_noise(dsp, s, len, NULL, totalnoise, NULL);
}
@@ -1393,7 +1414,7 @@ struct ast_frame *ast_dsp_process(struct ast_channel *chan, struct ast_dsp *dsp,
/* Need to run the silence detection stuff for silence suppression and busy detection */
if ((dsp->features & DSP_FEATURE_SILENCE_SUPPRESS) || (dsp->features & DSP_FEATURE_BUSY_DETECT)) {
- res = __ast_dsp_silence_noise(dsp, shortdata, len, &silence, NULL);
+ res = __ast_dsp_silence_noise(dsp, shortdata, len, &silence, NULL, NULL);
}
if ((dsp->features & DSP_FEATURE_SILENCE_SUPPRESS) && silence) {