From 3b781dbab5f29928415951db5e8f7d740a615a3d Mon Sep 17 00:00:00 2001 From: Jirka Novak Date: Tue, 7 Jan 2020 22:16:42 +0100 Subject: rtp_player_dialog: Route audio for a stream to left/right speaker in RTP player Column 'Play' added to player. Double click on a stream in the column changes audio routing for the stream. When soundcard supports only one channel, there are Mute/Play option. When soundcard supports two or more channels, there are Mute/L/L+R/R options. Muted channel is drawn with dotted line. Change-Id: If120c902195da46f98a1663c589f20c6a1da0ba7 Reviewed-on: https://code.wireshark.org/review/35687 Petri-Dish: Alexis La Goutte Tested-by: Petri Dish Buildbot Reviewed-by: Anders Broman --- ui/qt/rtp_audio_stream.cpp | 69 +++++++++++++++++---- ui/qt/rtp_audio_stream.h | 7 ++- ui/qt/rtp_player_dialog.cpp | 147 +++++++++++++++++++++++++++++++++++++++++--- ui/qt/rtp_player_dialog.h | 18 +++++- ui/qt/rtp_player_dialog.ui | 5 ++ 5 files changed, 219 insertions(+), 27 deletions(-) diff --git a/ui/qt/rtp_audio_stream.cpp b/ui/qt/rtp_audio_stream.cpp index 443f18d1af..b729ab4f62 100644 --- a/ui/qt/rtp_audio_stream.cpp +++ b/ui/qt/rtp_audio_stream.cpp @@ -133,10 +133,13 @@ void RtpAudioStream::addRtpPacket(const struct _packet_info *pinfo, const struct rtp_packets_ << rtp_packet; } -void RtpAudioStream::reset(double global_start_time) +void RtpAudioStream::reset(double global_start_time, bool stereo, bool left, bool right) { global_start_rel_time_ = global_start_time; stop_rel_time_ = start_rel_time_; + audio_stereo_ = stereo; + audio_left_ = left; + audio_right_ = right; audio_out_rate_ = 0; max_sample_val_ = 1; packet_timestamps_.clear(); @@ -157,7 +160,7 @@ static const int sample_bytes_ = sizeof(SAMPLE) / sizeof(char); /* Fix for bug 4119/5902: don't insert too many silence frames. * XXX - is there a better thing to do here? */ -static const int max_silence_samples_ = MAX_SILENCE_FRAMES; +static const qint64 max_silence_samples_ = MAX_SILENCE_FRAMES; void RtpAudioStream::decode() { @@ -238,7 +241,11 @@ void RtpAudioStream::decode() format.setSampleRate(sample_rate); format.setSampleSize(sample_bytes_ * 8); // bits format.setSampleType(QAudioFormat::SignedInt); - format.setChannelCount(1); + if (audio_stereo_) { + format.setChannelCount(2); + } else { + format.setChannelCount(1); + } format.setCodec("audio/pcm"); if (!cur_out_device.isFormatSupported(format)) { @@ -250,7 +257,7 @@ void RtpAudioStream::decode() // Prepend silence to match our sibling streams. tempfile_->seek(0); - int prepend_samples = (start_rel_time_ - global_start_rel_time_) * audio_out_rate_; + qint64 prepend_samples = (start_rel_time_ - global_start_rel_time_) * audio_out_rate_; if (prepend_samples > 0) { writeSilence(prepend_samples); } @@ -278,10 +285,10 @@ void RtpAudioStream::decode() /* if there was a silence period (more than two packetization period) resync the source */ if ((rtp_time - rtp_time_prev) > pack_period*2) { - int silence_samples; + qint64 silence_samples; RTP_STREAM_DEBUG("Resync..."); - silence_samples = (int)((arrive_time - arrive_time_prev)*sample_rate - decoded_bytes_prev / sample_bytes_); + silence_samples = (qint64)((arrive_time - arrive_time_prev)*sample_rate - decoded_bytes_prev / sample_bytes_); /* Fix for bug 4119/5902: don't insert too many silence frames. * XXX - is there a better thing to do here? */ @@ -301,7 +308,7 @@ void RtpAudioStream::decode() } else { // rtp_player.c:664 /* Add silence if it is necessary */ - int silence_samples; + qint64 silence_samples; if (timing_mode_ == Uninterrupted) { silence_samples = 0; @@ -365,7 +372,29 @@ void RtpAudioStream::decode() } // Write the decoded, possibly-resampled audio to our temp file. - tempfile_->write(write_buff, write_bytes); + gint64 silence = 0; + if (audio_stereo_) { + // Process audio mute/left/right settings + for(qint64 i=0; iwrite(write_buff+i, sample_bytes_); + } else { + tempfile_->write((char *)&silence, sample_bytes_); + } + if (audio_right_) { + tempfile_->write(write_buff+i, sample_bytes_); + } else { + tempfile_->write((char *)&silence, sample_bytes_); + } + } + } else { + // Process audio mute/unmute settings + if (audio_left_) { + tempfile_->write(write_buff, write_bytes); + } else { + writeSilence(write_bytes / sample_bytes_); + } + } // Collect our visual samples. spx_uint32_t in_len = (spx_uint32_t)rtp_packet->info->info_payload_len; @@ -570,7 +599,11 @@ void RtpAudioStream::startPlaying() format.setSampleRate(audio_out_rate_); format.setSampleSize(sample_bytes_ * 8); // bits format.setSampleType(QAudioFormat::SignedInt); - format.setChannelCount(1); + if (audio_stereo_) { + format.setChannelCount(2); + } else { + format.setChannelCount(1); + } format.setCodec("audio/pcm"); // RTP_STREAM_DEBUG("playing %s %d samples @ %u Hz", @@ -592,6 +625,10 @@ void RtpAudioStream::startPlaying() start_pos = (qint64)(start_play_time_ * sample_bytes_ * audio_out_rate_); // Round to sample_bytes_ boundary start_pos = (start_pos / sample_bytes_) * sample_bytes_; + if (audio_stereo_) { + // There is 2x more samples for stereo + start_pos *= 2; + } if (start_pos < tempfile_->size()) { tempfile_->seek(start_pos); audio_output_->start(tempfile_); @@ -614,15 +651,21 @@ void RtpAudioStream::stopPlaying() } } -void RtpAudioStream::writeSilence(int samples) +void RtpAudioStream::writeSilence(qint64 samples) { if (samples < 1 || audio_out_rate_ == 0) return; - unsigned silence_bytes = samples * sample_bytes_; + qint64 silence_bytes = samples * sample_bytes_; char *silence_buff = (char *) g_malloc0(silence_bytes); - RTP_STREAM_DEBUG("Writing %u silence samples", samples); - tempfile_->write(silence_buff, silence_bytes); + RTP_STREAM_DEBUG("Writing %llu silence samples", samples); + if (audio_stereo_) { + // Silence for left and right channel + tempfile_->write(silence_buff, silence_bytes); + tempfile_->write(silence_buff, silence_bytes); + } else { + tempfile_->write(silence_buff, silence_bytes); + } g_free(silence_buff); // Silence is inserted to audio file only. diff --git a/ui/qt/rtp_audio_stream.h b/ui/qt/rtp_audio_stream.h index 64948065a1..f39df7dd72 100644 --- a/ui/qt/rtp_audio_stream.h +++ b/ui/qt/rtp_audio_stream.h @@ -45,7 +45,7 @@ public: bool isMatch(const struct _packet_info *pinfo, const struct _rtp_info *rtp_info) const; //void addRtpStream(const rtpstream_info_t *rtpstream); void addRtpPacket(const struct _packet_info *pinfo, const struct _rtp_info *rtp_info); - void reset(double global_start_time); + void reset(double global_start_time, bool stereo, bool left, bool right); void decode(); double startRelTime() const { return start_rel_time_; } @@ -153,6 +153,9 @@ private: double start_abs_offset_; double start_rel_time_; double stop_rel_time_; + bool audio_stereo_; + bool audio_left_; + bool audio_right_; quint32 audio_out_rate_; QSet payload_names_; struct SpeexResamplerState_ *audio_resampler_; @@ -171,7 +174,7 @@ private: TimingMode timing_mode_; double start_play_time_; - void writeSilence(int samples); + void writeSilence(qint64 samples); const QString formatDescription(const QAudioFormat & format); QString currentOutputDevice(); diff --git a/ui/qt/rtp_player_dialog.cpp b/ui/qt/rtp_player_dialog.cpp index 3be1d397df..18026df063 100644 --- a/ui/qt/rtp_player_dialog.cpp +++ b/ui/qt/rtp_player_dialog.cpp @@ -68,6 +68,7 @@ // In some places we match by conv/call number, in others we match by first frame. enum { + channel_col_, src_addr_col_, src_port_col_, dst_addr_col_, @@ -80,7 +81,8 @@ enum { payload_col_, stream_data_col_ = src_addr_col_, // RtpAudioStream - graph_data_col_ = src_port_col_ // QCPGraph + graph_data_col_ = src_port_col_, // QCPGraph + channel_data_col_ = channel_col_, // channel_mode_t }; #ifdef QT_MULTIMEDIA_LIB @@ -99,6 +101,7 @@ RtpPlayerDialog::RtpPlayerDialog(QWidget &parent, CaptureFile &cf) : #endif // QT_MULTIMEDIA_LIB , number_ticker_(new QCPAxisTicker) , datetime_ticker_(new QCPAxisTickerDateTime) + , stereo_available_(false) { ui->setupUi(this); setWindowTitle(wsApp->windowTitleString(tr("RTP Player"))); @@ -181,6 +184,8 @@ RtpPlayerDialog::RtpPlayerDialog(QWidget &parent, CaptureFile &cf) : ); ui->audioPlot->setFocus(); + stereo_available_ = isStereoAvailable(); + QTimer::singleShot(0, this, SLOT(retapPackets())); #endif // QT_MULTIMEDIA_LIB } @@ -239,9 +244,35 @@ void RtpPlayerDialog::rescanPackets(bool rescale_axes) int row_count = ui->streamTreeWidget->topLevelItemCount(); // Clear existing graphs and reset stream values for (int row = 0; row < row_count; row++) { + bool left, right; + QTreeWidgetItem *ti = ui->streamTreeWidget->topLevelItem(row); RtpAudioStream *audio_stream = ti->data(stream_data_col_, Qt::UserRole).value(); - audio_stream->reset(first_stream_rel_start_time_); + channel_mode_t channel_mode = (channel_mode_t)ti->data(channel_data_col_, Qt::UserRole).toUInt(); + left = right = true; + switch (channel_mode) { + case channel_none: + left = false; + right = false; + break; + case channel_mono: + left = true; + right = false; + break; + case channel_stereo_left: + left = true; + right = false; + break; + case channel_stereo_right: + left = false; + right = true; + break; + case channel_stereo_both: + left = true; + right = true; + break; + } + audio_stream->reset(first_stream_rel_start_time_, stereo_available_, left, right); ti->setData(graph_data_col_, Qt::UserRole, QVariant()); } @@ -259,6 +290,7 @@ void RtpPlayerDialog::rescanPackets(bool rescale_axes) for (int row = 0; row < row_count; row++) { QTreeWidgetItem *ti = ui->streamTreeWidget->topLevelItem(row); RtpAudioStream *audio_stream = ti->data(stream_data_col_, Qt::UserRole).value(); + channel_mode_t channel_mode = (channel_mode_t)ti->data(channel_data_col_, Qt::UserRole).toUInt(); int y_offset = row_count - row - 1; audio_stream->setJitterBufferSize((int) ui->jitterSpinBox->value()); @@ -282,6 +314,10 @@ void RtpPlayerDialog::rescanPackets(bool rescale_axes) QCPGraph *audio_graph = ui->audioPlot->addGraph(); QPen wf_pen(audio_stream->color()); wf_pen.setWidthF(wf_graph_normal_width_); + if (channel_mode == channel_none) { + // Indicate that audio will not be hearable + wf_pen.setStyle(Qt::DotLine); + } audio_graph->setPen(wf_pen); audio_graph->setSelectable(QCP::stNone); audio_graph->setData(audio_stream->visualTimestamps(relative_timestamps), audio_stream->visualSamples(y_offset)); @@ -371,6 +407,8 @@ void RtpPlayerDialog::rescanPackets(bool rescale_axes) void RtpPlayerDialog::addRtpStream(rtpstream_info_t *rtpstream) { + channel_mode_t channel_mode = channel_none; + if (!rtpstream) return; // Find the RTP streams associated with this conversation. @@ -401,6 +439,17 @@ void RtpPlayerDialog::addRtpStream(rtpstream_info_t *rtpstream) ti->setText(num_pkts_col_, QString::number(rtpstream->packet_count)); ti->setData(stream_data_col_, Qt::UserRole, QVariant::fromValue(audio_stream)); + if (stereo_available_) { + if (tli_count%2) { + channel_mode = channel_stereo_right; + } else { + channel_mode = channel_stereo_left; + } + } else { + channel_mode = channel_mono; + } + ti->setToolTip(channel_data_col_, QString(tr("Double click to change audio routing"))); + setChannelMode(ti, channel_mode); for (int col = 0; col < ui->streamTreeWidget->columnCount(); col++) { QBrush fgBrush = ti->foreground(col); @@ -750,15 +799,26 @@ void RtpPlayerDialog::on_streamTreeWidget_itemSelectionChanged() ui->audioPlot->setFocus(); } -const QString RtpPlayerDialog::getFormatedTime(double time) +// Change channel if clicked channel column +void RtpPlayerDialog::on_streamTreeWidget_itemDoubleClicked(QTreeWidgetItem *item, const int column) +{ + if (column == channel_col_) { + channel_mode_t channel_mode = (channel_mode_t)item->data(channel_data_col_, Qt::UserRole).toUInt(); + channel_mode = changeChannelMode(channel_mode); + setChannelMode(item, channel_mode); + rescanPackets(); + } +} + +const QString RtpPlayerDialog::getFormatedTime(double f_time) { QString time_str; if (ui->todCheckBox->isChecked()) { - QDateTime date_time = QDateTime::fromMSecsSinceEpoch(time * 1000.0); + QDateTime date_time = QDateTime::fromMSecsSinceEpoch(f_time * 1000.0); time_str = date_time.toString("yyyy-MM-dd hh:mm:ss.zzz"); } else { - time_str = QString::number(time, 'f', 3); + time_str = QString::number(f_time, 'f', 3); time_str += " s"; } @@ -859,15 +919,15 @@ void RtpPlayerDialog::drawStartPlayMarker() updateHintLabel(); } -void RtpPlayerDialog::setStartPlayMarker(double time) +void RtpPlayerDialog::setStartPlayMarker(double new_time) { if (ui->todCheckBox->isChecked()) { - time = qBound(first_stream_abs_start_time_, time, first_stream_abs_start_time_ + streams_length_); + new_time = qBound(first_stream_abs_start_time_, new_time, first_stream_abs_start_time_ + streams_length_); // start_play_time is relative, we must calculate it - start_marker_time_ = time - first_stream_abs_start_time_; + start_marker_time_ = new_time - first_stream_abs_start_time_; } else { - time = qBound(first_stream_rel_start_time_, time, first_stream_rel_start_time_ + streams_length_); - start_marker_time_ = time; + new_time = qBound(first_stream_rel_start_time_, new_time, first_stream_rel_start_time_ + streams_length_); + start_marker_time_ = new_time; } } @@ -892,6 +952,73 @@ void RtpPlayerDialog::updateStartStopTime(rtpstream_info_t *rtpstream, int tli_c streams_length_ = first_stream_rel_stop_time_ - first_stream_rel_start_time_; } +void RtpPlayerDialog::setChannelMode(QTreeWidgetItem *ti, channel_mode_t channel_mode) +{ + QString t; + + ti->setData(channel_data_col_, Qt::UserRole, QVariant(channel_mode)); + switch (channel_mode) { + case channel_none: + t=QString("Mute"); + break; + case channel_mono: + t=QString("Play"); + break; + case channel_stereo_left: + t=QString("L"); + break; + case channel_stereo_right: + t=QString("R"); + break; + case channel_stereo_both: + t=QString("L+R"); + break; + } + + ti->setText(channel_col_, t); +} + +channel_mode_t RtpPlayerDialog::changeChannelMode(channel_mode_t channel_mode) +{ + if (stereo_available_) { + // Stereo + switch (channel_mode) { + case channel_stereo_left: + return channel_stereo_both; + case channel_stereo_both: + return channel_stereo_right; + case channel_stereo_right: + return channel_none; + case channel_none: + return channel_stereo_left; + default: + return channel_stereo_left; + } + } else { + // Mono + switch (channel_mode) { + case channel_none: + return channel_mono; + case channel_mono: + return channel_none; + default: + return channel_mono; + } + } +} + +bool RtpPlayerDialog::isStereoAvailable() +{ + QAudioDeviceInfo cur_out_device = QAudioDeviceInfo::defaultOutputDevice(); + foreach(int count, cur_out_device.supportedChannelCounts()) { + if (count>1) { + return true; + } + } + + return false; +} + #if 0 // This also serves as a title in RtpAudioFrame. static const QString stream_key_tmpl_ = "%1:%2 " UTF8_RIGHTWARDS_ARROW " %3:%4 0x%5"; diff --git a/ui/qt/rtp_player_dialog.h b/ui/qt/rtp_player_dialog.h index d30066e144..b7035ebeaa 100644 --- a/ui/qt/rtp_player_dialog.h +++ b/ui/qt/rtp_player_dialog.h @@ -19,11 +19,20 @@ #include "wireshark_dialog.h" #include +#include namespace Ui { class RtpPlayerDialog; } +typedef enum { + channel_none, // Mute + channel_mono, // Play + channel_stereo_left, // L + channel_stereo_right, // R + channel_stereo_both // L+R +} channel_mode_t; + class QCPItemStraightLine; class QDialogButtonBox; class QMenu; @@ -104,6 +113,7 @@ private slots: void on_actionMoveRight1_triggered(); void on_actionGoToPacket_triggered(); void on_streamTreeWidget_itemSelectionChanged(); + void on_streamTreeWidget_itemDoubleClicked(QTreeWidgetItem *item, const int column); void on_outputDeviceComboBox_currentIndexChanged(const QString &); void on_jitterSpinBox_valueChanged(double); void on_timingComboBox_currentIndexChanged(int); @@ -123,6 +133,7 @@ private: QString playback_error_; QSharedPointer number_ticker_; QSharedPointer datetime_ticker_; + bool stereo_available_; // const QString streamKey(const rtpstream_info_t *rtpstream); // const QString streamKey(const packet_info *pinfo, const struct _rtp_info *rtpinfo); @@ -135,14 +146,17 @@ private: void addPacket(packet_info *pinfo, const struct _rtp_info *rtpinfo); void zoomXAxis(bool in); void panXAxis(int x_pixels); - const QString getFormatedTime(double time); + const QString getFormatedTime(double f_time); const QString getFormatedHoveredTime(); int getHoveredPacket(); QString currentOutputDeviceName(); double getStartPlayMarker(); void drawStartPlayMarker(); - void setStartPlayMarker(double time); + void setStartPlayMarker(double new_time); void updateStartStopTime(rtpstream_info_t *rtpstream, int tli_count); + void setChannelMode(QTreeWidgetItem *ti, channel_mode_t channel_mode); + channel_mode_t changeChannelMode(channel_mode_t channel_mode); + bool isStereoAvailable(); #else // QT_MULTIMEDIA_LIB private: diff --git a/ui/qt/rtp_player_dialog.ui b/ui/qt/rtp_player_dialog.ui index a1e478ebd8..af0c7deb47 100644 --- a/ui/qt/rtp_player_dialog.ui +++ b/ui/qt/rtp_player_dialog.ui @@ -30,6 +30,11 @@ false + + + Play + + Source Address -- cgit v1.2.3