aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLukas Lao Beyer <lukas@electronics.kitchen>2017-04-07 23:08:01 -0400
committerDimitri Stolnikov <horiz0n@gmx.net>2017-04-18 23:48:08 +0200
commit5ecfa255d299b9b4842ccd09a02892a853fcd5a7 (patch)
tree53c8c114cc17febf42db53c28e67e67d333959db /lib
parente9dde9afd754c6e3bcf1a1444c04064051f04ecd (diff)
Add support for FreeSRP
This patch adds support for both receiving and transmitting using the FreeSRP. More information on the FreeSRP can be found at: http://freesrp.org The gr-osmosdr blocks added make use of libfreesrp, the library required for interfacing with this device. The libfreesrp source code is freely available at https://github.com/freesrp/libfreesrp Usage example: osmocom_fft -a "freesrp"
Diffstat (limited to 'lib')
-rw-r--r--lib/CMakeLists.txt8
-rw-r--r--lib/config.h.in1
-rw-r--r--lib/device.cc8
-rw-r--r--lib/freesrp/CMakeLists.txt39
-rw-r--r--lib/freesrp/freesrp_common.cc199
-rw-r--r--lib/freesrp/freesrp_common.h29
-rw-r--r--lib/freesrp/freesrp_sink_c.cc280
-rw-r--r--lib/freesrp/freesrp_sink_c.h130
-rw-r--r--lib/freesrp/freesrp_source_c.cc341
-rw-r--r--lib/freesrp/freesrp_source_c.h131
-rw-r--r--lib/freesrp/readerwriterqueue/LICENSE.md28
-rw-r--r--lib/freesrp/readerwriterqueue/README.md114
-rw-r--r--lib/freesrp/readerwriterqueue/atomicops.h577
-rw-r--r--lib/freesrp/readerwriterqueue/readerwriterqueue.h764
-rw-r--r--lib/sink_impl.cc16
-rw-r--r--lib/source_impl.cc19
16 files changed, 2684 insertions, 0 deletions
diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt
index a99890a..c05b8d9 100644
--- a/lib/CMakeLists.txt
+++ b/lib/CMakeLists.txt
@@ -241,6 +241,14 @@ GR_INCLUDE_SUBDIRECTORY(redpitaya)
endif(ENABLE_REDPITAYA)
########################################################################
+# Setup FreeSRP component
+########################################################################
+GR_REGISTER_COMPONENT("FreeSRP support" ENABLE_FREESRP LIBFREESRP_FOUND)
+if(ENABLE_FREESRP)
+GR_INCLUDE_SUBDIRECTORY(freesrp)
+endif(ENABLE_FREESRP)
+
+########################################################################
# Setup configuration file
########################################################################
ADD_DEFINITIONS(-DHAVE_CONFIG_H=1)
diff --git a/lib/config.h.in b/lib/config.h.in
index 1e843a8..42e72f1 100644
--- a/lib/config.h.in
+++ b/lib/config.h.in
@@ -18,6 +18,7 @@
#cmakedefine ENABLE_AIRSPY
#cmakedefine ENABLE_SOAPY
#cmakedefine ENABLE_REDPITAYA
+#cmakedefine ENABLE_FREESRP
//provide NAN define for MSVC older than VC12
#if defined(_MSC_VER) && (_MSC_VER < 1800)
diff --git a/lib/device.cc b/lib/device.cc
index d75d6ad..025a22b 100644
--- a/lib/device.cc
+++ b/lib/device.cc
@@ -86,6 +86,10 @@
#include <redpitaya_source_c.h>
#endif
+#ifdef ENABLE_FREESRP
+#include <freesrp_source_c.h>
+#endif
+
#include "arg_helpers.h"
using namespace osmosdr;
@@ -182,6 +186,10 @@ devices_t device::find(const device_t &hint)
BOOST_FOREACH( std::string dev, airspy_source_c::get_devices() )
devices.push_back( device_t(dev) );
#endif
+#ifdef ENABLE_FREESRP
+ BOOST_FOREACH( std::string dev, freesrp_source_c::get_devices() )
+ devices.push_back( device_t(dev) );
+#endif
#ifdef ENABLE_SOAPY
BOOST_FOREACH( std::string dev, soapy_source_c::get_devices() )
devices.push_back( device_t(dev) );
diff --git a/lib/freesrp/CMakeLists.txt b/lib/freesrp/CMakeLists.txt
new file mode 100644
index 0000000..46df7e4
--- /dev/null
+++ b/lib/freesrp/CMakeLists.txt
@@ -0,0 +1,39 @@
+# Copyright 2012 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+########################################################################
+# This file included, use CMake directory variables
+########################################################################
+
+include_directories(
+ ${CMAKE_CURRENT_SOURCE_DIR}
+ ${LIBFREESRP_INCLUDE_DIRS}
+)
+
+set(freesrp_srcs
+ ${CMAKE_CURRENT_SOURCE_DIR}/freesrp_common.cc
+ ${CMAKE_CURRENT_SOURCE_DIR}/freesrp_source_c.cc
+ ${CMAKE_CURRENT_SOURCE_DIR}/freesrp_sink_c.cc
+)
+
+########################################################################
+# Append gnuradio-osmosdr library sources
+########################################################################
+list(APPEND gr_osmosdr_srcs ${freesrp_srcs})
+list(APPEND gr_osmosdr_libs ${LIBFREESRP_LIBRARIES})
diff --git a/lib/freesrp/freesrp_common.cc b/lib/freesrp/freesrp_common.cc
new file mode 100644
index 0000000..77db220
--- /dev/null
+++ b/lib/freesrp/freesrp_common.cc
@@ -0,0 +1,199 @@
+#include "freesrp_common.h"
+
+#include <cstdlib>
+
+#include <boost/make_shared.hpp>
+#include <boost/assign.hpp>
+
+#include <arg_helpers.h>
+
+using namespace FreeSRP;
+using namespace std;
+using namespace boost::assign;
+
+boost::shared_ptr<::FreeSRP::FreeSRP> freesrp_common::_srp;
+
+freesrp_common::freesrp_common(const string &args)
+{
+ dict_t dict = params_to_dict(args);
+
+ if(!_srp)
+ {
+ try
+ {
+ string serial = "";
+
+ if(dict.count("freesrp"))
+ {
+ serial = dict["freesrp"];
+ }
+
+ if(dict.count("fx3"))
+ {
+ if(Util::find_fx3())
+ {
+ // Upload firmware to FX3
+ string firmware_path = string(getenv("HOME")) + "/.freesrp/fx3.img";
+ if(dict["fx3"].length() > 0)
+ {
+ firmware_path = dict["fx3"];
+ }
+ Util::find_fx3(true, firmware_path);
+ cout << "FX3 programmed with '" << firmware_path << "'" << endl;
+ // Give FX3 time to re-enumerate
+ this_thread::sleep_for(chrono::milliseconds(600));
+ }
+ else
+ {
+ cout << "No FX3 in bootloader mode found" << endl;
+ }
+ }
+
+ _srp.reset(new ::FreeSRP::FreeSRP(serial));
+
+ if(dict.count("fpga") || !_srp->fpga_loaded())
+ {
+ string bitstream_path = string(getenv("HOME")) + "/.freesrp/fpga.bin";
+ if(dict["fpga"].length() > 0)
+ {
+ bitstream_path = dict["fpga"];
+ }
+ fpga_status stat = _srp->load_fpga(bitstream_path);
+ switch(stat)
+ {
+ case FPGA_CONFIG_ERROR:
+ throw runtime_error("Could not load FPGA configuration!");
+ case FPGA_CONFIG_SKIPPED:
+ cout << "FPGA already configured. Restart the FreeSRP to load a new bitstream." << endl;
+ break;
+ case FPGA_CONFIG_DONE:
+ cout << "FPGA configured with '" << bitstream_path << "'" << endl;
+ break;
+ }
+ }
+
+ cout << "Connected to FreeSRP" << endl;
+
+ if(dict.count("loopback"))
+ {
+ response res = _srp->send_cmd({SET_LOOPBACK_EN, 1});
+ if(res.error == CMD_OK)
+ {
+ cout << "AD9364 in loopback mode" << endl;
+ }
+ else
+ {
+ throw runtime_error("Could not put AD9364 into loopback mode!");
+ }
+ }
+ else
+ {
+ response res = _srp->send_cmd({SET_LOOPBACK_EN, 0});
+ if(res.error != CMD_OK)
+ {
+ throw runtime_error("Error disabling AD9364 loopback mode!");
+ }
+ }
+
+ if(dict.count("ignore_overflow"))
+ {
+ _ignore_overflow = true;
+ }
+ else
+ {
+ _ignore_overflow = false;
+ }
+ }
+ catch(const runtime_error& e)
+ {
+ cerr << "FreeSRP Error: " << e.what() << endl;
+ throw runtime_error(e.what());
+ }
+ }
+}
+
+vector<string> freesrp_common::get_devices()
+{
+ vector<string> devices;
+
+ try
+ {
+ ::FreeSRP::FreeSRP srp;
+
+ string str;
+ str = "freesrp=0,label='FreeSRP'";
+
+ devices.push_back(str);
+ }
+ catch(const ConnectionError &err)
+ {
+ // No FreeSRP found.
+ }
+
+ return devices;
+}
+
+size_t freesrp_common::get_num_channels( void )
+{
+ return 1;
+}
+
+osmosdr::meta_range_t freesrp_common::get_sample_rates( void )
+{
+ osmosdr::meta_range_t range;
+
+ // Any sample rate between 1e6 and 61.44e6 can be requested.
+ // This list of some integer values is used instead of
+ // range += osmosdr::range_t(1e6, 61.44e6);
+ // because SoapyOsmo seems to handle the range object differently.
+ range += osmosdr::range_t(1e6);
+ range += osmosdr::range_t(8e6);
+ range += osmosdr::range_t(16e6);
+ range += osmosdr::range_t(20e6);
+ range += osmosdr::range_t(40e6);
+ range += osmosdr::range_t(50e6);
+ range += osmosdr::range_t(61.44e6);
+
+ return range;
+}
+
+osmosdr::freq_range_t freesrp_common::get_freq_range(size_t chan)
+{
+ osmosdr::meta_range_t freq_ranges;
+
+ freq_ranges.push_back(osmosdr::range_t(7e7, 6e9, 2.4));
+
+ return freq_ranges;
+}
+
+
+osmosdr::freq_range_t freesrp_common::get_bandwidth_range(size_t chan)
+{
+ osmosdr::meta_range_t range;
+
+ //range += osmosdr::range_t(2e5, 56e6);
+
+ range += osmosdr::range_t(2e5);
+ range += osmosdr::range_t(1e6);
+ range += osmosdr::range_t(8e6);
+ range += osmosdr::range_t(16e6);
+ range += osmosdr::range_t(20e6);
+ range += osmosdr::range_t(40e6);
+ range += osmosdr::range_t(50e6);
+ range += osmosdr::range_t(56e6);
+
+ return range;
+}
+
+
+double freesrp_common::set_freq_corr( double ppm, size_t chan )
+{
+ // TODO: Set DCXO tuning
+ return 0;
+}
+
+double freesrp_common::get_freq_corr( size_t chan )
+{
+ // TODO: Get DCXO tuning
+ return 0;
+}
diff --git a/lib/freesrp/freesrp_common.h b/lib/freesrp/freesrp_common.h
new file mode 100644
index 0000000..9a5687c
--- /dev/null
+++ b/lib/freesrp/freesrp_common.h
@@ -0,0 +1,29 @@
+#ifndef INCLUDED_FREESRP_COMMON_H
+#define INCLUDED_FREESRP_COMMON_H
+
+#include <vector>
+#include <string>
+
+#include "osmosdr/ranges.h"
+
+#include <freesrp.hpp>
+
+class freesrp_common
+{
+protected:
+ freesrp_common(const std::string &args);
+public:
+ static std::vector<std::string> get_devices();
+
+ size_t get_num_channels( void );
+ osmosdr::meta_range_t get_sample_rates( void );
+ osmosdr::freq_range_t get_freq_range( size_t chan = 0 );
+ osmosdr::freq_range_t get_bandwidth_range( size_t chan = 0 );
+ double set_freq_corr( double ppm, size_t chan = 0 );
+ double get_freq_corr( size_t chan = 0 );
+protected:
+ static boost::shared_ptr<::FreeSRP::FreeSRP> _srp;
+ bool _ignore_overflow = false;
+};
+
+#endif
diff --git a/lib/freesrp/freesrp_sink_c.cc b/lib/freesrp/freesrp_sink_c.cc
new file mode 100644
index 0000000..fe692f4
--- /dev/null
+++ b/lib/freesrp/freesrp_sink_c.cc
@@ -0,0 +1,280 @@
+#include "freesrp_sink_c.h"
+
+using namespace FreeSRP;
+using namespace std;
+
+freesrp_sink_c_sptr make_freesrp_sink_c (const string &args)
+{
+ return gnuradio::get_initial_sptr(new freesrp_sink_c (args));
+}
+
+/*
+ * Specify constraints on number of input and output streams.
+ * This info is used to construct the input and output signatures
+ * (2nd & 3rd args to gr_block's constructor). The input and
+ * output signatures are used by the runtime system to
+ * check that a valid number and type of inputs and outputs
+ * are connected to this block. In this case, we accept
+ * only 1 input and 0 output.
+ */
+static const int MIN_IN = 1; // mininum number of input streams
+static const int MAX_IN = 1; // maximum number of input streams
+static const int MIN_OUT = 0; // minimum number of output streams
+static const int MAX_OUT = 0; // maximum number of output streams
+
+freesrp_sink_c::freesrp_sink_c (const string & args) : gr::sync_block("freesrp_sink_c",
+ gr::io_signature::make (MIN_IN, MAX_IN, sizeof (gr_complex)),
+ gr::io_signature::make (MIN_OUT, MAX_OUT, sizeof (gr_complex))),
+ freesrp_common(args)
+{
+ if(_srp == nullptr)
+ {
+ throw runtime_error("FreeSRP not initialized!");
+ }
+}
+
+bool freesrp_sink_c::start()
+{
+ response res = _srp->send_cmd({SET_DATAPATH_EN, 1});
+ if(res.error != CMD_OK)
+ {
+ return false;
+ }
+ _srp->start_tx(std::bind(&freesrp_sink_c::freesrp_tx_callback, this, std::placeholders::_1));
+ return true;
+}
+
+bool freesrp_sink_c::stop()
+{
+ _srp->send_cmd({SET_DATAPATH_EN, 0});
+ _srp->stop_tx();
+ return true;
+}
+
+void freesrp_sink_c::freesrp_tx_callback(vector<sample>& samples)
+{
+ unique_lock<std::mutex> lk(_buf_mut);
+
+ for(sample &s : samples)
+ {
+ if(!_buf_queue.try_dequeue(s))
+ {
+ s.i = 0;
+ s.q = 0;
+ }
+ else
+ {
+ _buf_available_space++;
+ }
+ }
+
+ _buf_cond.notify_one();
+}
+
+int freesrp_sink_c::work(int noutput_items, gr_vector_const_void_star& input_items, gr_vector_void_star& output_items)
+{
+ const gr_complex *in = (const gr_complex *) input_items[0];
+
+ unique_lock<std::mutex> lk(_buf_mut);
+
+ // Wait until enough space is available
+ while(_buf_available_space < (unsigned int) noutput_items)
+ {
+ _buf_cond.wait(lk);
+ }
+
+ for(int i = 0; i < noutput_items; ++i)
+ {
+ sample s;
+ s.i = (int16_t) (real(in[i]) * 2047.0f);
+ s.q = (int16_t) (imag(in[i]) * 2047.0f);
+
+ if(!_buf_queue.try_enqueue(s))
+ {
+ throw runtime_error("Failed to add sample to buffer. This should never happen. Available space reported to be " + to_string(_buf_available_space) + " samples, noutput_items=" + to_string(noutput_items) + ", i=" + to_string(i));
+ }
+ else
+ {
+ _buf_available_space--;
+ }
+ }
+
+ return noutput_items;
+}
+
+double freesrp_sink_c::set_sample_rate( double rate )
+{
+ command cmd = _srp->make_command(SET_TX_SAMP_FREQ, rate);
+ response r = _srp->send_cmd(cmd);
+ if(r.error != CMD_OK)
+ {
+ cerr << "Could not set TX sample rate, error: " << r.error << endl;
+ return 0;
+ }
+ else
+ {
+ return static_cast<double>(r.param);
+ }
+}
+
+double freesrp_sink_c::get_sample_rate( void )
+{
+ response r = _srp->send_cmd({GET_TX_SAMP_FREQ, 0});
+ if(r.error != CMD_OK)
+ {
+ cerr << "Could not get TX sample rate, error: " << r.error << endl;
+ return 0;
+ }
+ else
+ {
+ return r.param;
+ }
+}
+
+double freesrp_sink_c::set_center_freq( double freq, size_t chan )
+{
+ command cmd = _srp->make_command(SET_TX_LO_FREQ, freq);
+ response r = _srp->send_cmd(cmd);
+ if(r.error != CMD_OK)
+ {
+ cerr << "Could not set TX LO frequency, error: " << r.error << endl;
+ return 0;
+ }
+ else
+ {
+ return static_cast<double>(r.param);
+ }
+}
+
+double freesrp_sink_c::get_center_freq( size_t chan )
+{
+ response r = _srp->send_cmd({GET_TX_LO_FREQ, 0});
+ if(r.error != CMD_OK)
+ {
+ cerr << "Could not get TX LO frequency, error: " << r.error << endl;
+ return 0;
+ }
+ else
+ {
+ return static_cast<double>(r.param);
+ }
+}
+
+vector<string> freesrp_sink_c::get_gain_names( size_t chan )
+{
+ vector<string> names;
+
+ names.push_back("TX_RF");
+
+ return names;
+}
+
+osmosdr::gain_range_t freesrp_sink_c::get_gain_range(size_t chan)
+{
+ osmosdr::meta_range_t gain_ranges;
+
+ gain_ranges.push_back(osmosdr::range_t(0, 89.75, 0.25));
+
+ return gain_ranges;
+}
+
+osmosdr::gain_range_t freesrp_sink_c::get_gain_range(const string& name, size_t chan)
+{
+ return get_gain_range(chan);
+}
+
+double freesrp_sink_c::set_gain(double gain, size_t chan)
+{
+ gain = get_gain_range().clip(gain);
+
+ double atten = 89.75 - gain;
+
+ command cmd = _srp->make_command(SET_TX_ATTENUATION, atten * 1000);
+ response r = _srp->send_cmd(cmd);
+ if(r.error != CMD_OK)
+ {
+ cerr << "Could not set TX attenuation, error: " << r.error << endl;
+ return 0;
+ }
+ else
+ {
+ return 89.75 - (((double) r.param) / 1000.0);
+ }
+}
+
+double freesrp_sink_c::set_gain(double gain, const string& name, size_t chan)
+{
+ return set_gain(gain, chan);
+}
+
+double freesrp_sink_c::get_gain(size_t chan)
+{
+ response r = _srp->send_cmd({GET_TX_ATTENUATION, 0});
+ if(r.error != CMD_OK)
+ {
+ cerr << "Could not get TX RF attenuation, error: " << r.error << endl;
+ return 0;
+ }
+ else
+ {
+ return 89.75 - (((double) r.param) / 1000.0);
+ }
+}
+
+double freesrp_sink_c::get_gain(const string& name, size_t chan)
+{
+ return get_gain(chan);
+}
+
+double freesrp_sink_c::set_bb_gain(double gain, size_t chan)
+{
+ return set_gain(gain, chan);
+}
+
+vector<string> freesrp_sink_c::get_antennas(size_t chan)
+{
+ vector<string> antennas;
+
+ antennas.push_back(get_antenna(chan));
+
+ return antennas;
+}
+
+string freesrp_sink_c::set_antenna(const string& antenna, size_t chan)
+{
+ return get_antenna(chan);
+}
+
+string freesrp_sink_c::get_antenna(size_t chan)
+{
+ return "TX";
+}
+
+double freesrp_sink_c::set_bandwidth(double bandwidth, size_t chan)
+{
+ command cmd = _srp->make_command(SET_TX_RF_BANDWIDTH, bandwidth);
+ response r = _srp->send_cmd(cmd);
+ if(r.error != CMD_OK)
+ {
+ cerr << "Could not set TX RF bandwidth, error: " << r.error << endl;
+ return 0;
+ }
+ else
+ {
+ return static_cast<double>(r.param);
+ }
+}
+
+double freesrp_sink_c::get_bandwidth(size_t chan)
+{
+ response r = _srp->send_cmd({GET_TX_RF_BANDWIDTH, 0});
+ if(r.error != CMD_OK)
+ {
+ cerr << "Could not get TX RF bandwidth, error: " << r.error << endl;
+ return 0;
+ }
+ else
+ {
+ return r.param;
+ }
+}
diff --git a/lib/freesrp/freesrp_sink_c.h b/lib/freesrp/freesrp_sink_c.h
new file mode 100644
index 0000000..ce75785
--- /dev/null
+++ b/lib/freesrp/freesrp_sink_c.h
@@ -0,0 +1,130 @@
+/* -*- c++ -*- */
+/*
+ * Copyright 2015 Lukas Lao Beyer
+ * Copyright 2013 Dimitri Stolnikov <horiz0n@gmx.net>
+ *
+ * GNU Radio is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * GNU Radio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU Radio; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street,
+ * Boston, MA 02110-1301, USA.
+ */
+#ifndef INCLUDED_FREESRP_SINK_C_H
+#define INCLUDED_FREESRP_SINK_C_H
+
+#include <gnuradio/thread/thread.h>
+#include <gnuradio/block.h>
+#include <gnuradio/sync_block.h>
+
+#include "osmosdr/ranges.h"
+#include "sink_iface.h"
+
+#include "freesrp_common.h"
+#include "readerwriterqueue/readerwriterqueue.h"
+
+#include <mutex>
+#include <condition_variable>
+
+#include <freesrp.hpp>
+
+class freesrp_sink_c;
+
+/*
+ * We use boost::shared_ptr's instead of raw pointers for all access
+ * to gr_blocks (and many other data structures). The shared_ptr gets
+ * us transparent reference counting, which greatly simplifies storage
+ * management issues. This is especially helpful in our hybrid
+ * C++ / Python system.
+ *
+ * See http://www.boost.org/libs/smart_ptr/smart_ptr.htm
+ *
+ * As a convention, the _sptr suffix indicates a boost::shared_ptr
+ */
+typedef boost::shared_ptr<freesrp_sink_c> freesrp_sink_c_sptr;
+
+/*!
+ * \brief Return a shared_ptr to a new instance of freesrp_sink_c.
+ *
+ * To avoid accidental use of raw pointers, freesrp_sink_c's
+ * constructor is private. make_freesrp_sink_c is the public
+ * interface for creating new instances.
+ */
+freesrp_sink_c_sptr make_freesrp_sink_c (const std::string & args = "");
+
+class freesrp_sink_c :
+ public gr::sync_block,
+ public sink_iface,
+ public freesrp_common
+{
+private:
+ // The friend declaration allows freesrp_make_sink_c to
+ // access the private constructor.
+ friend freesrp_sink_c_sptr make_freesrp_sink_c (const std::string & args);
+
+ freesrp_sink_c (const std::string & args); // private constructor
+
+public:
+
+ // From freesrp_common:
+ static std::vector<std::string> get_devices() { return freesrp_common::get_devices(); };
+ size_t get_num_channels( void ) { return freesrp_common::get_num_channels(); }
+ osmosdr::meta_range_t get_sample_rates( void ) { return freesrp_common::get_sample_rates(); }
+ osmosdr::freq_range_t get_freq_range( size_t chan = 0 ) { return freesrp_common::get_freq_range(chan); }
+ osmosdr::freq_range_t get_bandwidth_range( size_t chan = 0 ) { return freesrp_common::get_bandwidth_range(chan); }
+ double set_freq_corr( double ppm, size_t chan = 0 ) { return freesrp_common::set_freq_corr(ppm, chan); }
+ double get_freq_corr( size_t chan = 0 ) { return freesrp_common::get_freq_corr(chan); }
+
+ bool start();
+ bool stop();
+
+ int work( int noutput_items,
+ gr_vector_const_void_star &input_items,
+ gr_vector_void_star &output_items );
+
+ double set_sample_rate( double rate );
+ double get_sample_rate( void );
+
+ double set_center_freq( double freq, size_t chan = 0 );
+ double get_center_freq( size_t chan = 0 );
+
+ std::vector<std::string> get_gain_names( size_t chan = 0 );
+ osmosdr::gain_range_t get_gain_range( size_t chan = 0 );
+ osmosdr::gain_range_t get_gain_range( const std::string & name, size_t chan = 0 );
+ //TODO: implement this: bool set_gain_mode( bool automatic, size_t chan = 0 );
+ //TODO: implement this: bool get_gain_mode( size_t chan = 0 );
+ double set_gain( double gain, size_t chan = 0 );
+ double set_gain( double gain, const std::string & name, size_t chan = 0 );
+ double get_gain( size_t chan = 0 );
+ double get_gain( const std::string & name, size_t chan = 0 );
+
+ double set_bb_gain( double gain, size_t chan = 0 );
+
+ std::vector< std::string > get_antennas( size_t chan = 0 );
+ std::string set_antenna( const std::string & antenna, size_t chan = 0 );
+ std::string get_antenna( size_t chan = 0 );
+
+ double set_bandwidth( double bandwidth, size_t chan = 0 );
+ double get_bandwidth( size_t chan = 0 );
+
+private:
+
+ void freesrp_tx_callback(std::vector<::FreeSRP::sample> &samples);
+
+ bool _running = false;
+
+ std::mutex _buf_mut{};
+ std::condition_variable _buf_cond{};
+ size_t _buf_available_space = FREESRP_RX_TX_QUEUE_SIZE;
+ moodycamel::ReaderWriterQueue<::FreeSRP::sample> _buf_queue{FREESRP_RX_TX_QUEUE_SIZE};
+};
+
+#endif /* INCLUDED_FREESRP_SINK_C_H */
diff --git a/lib/freesrp/freesrp_source_c.cc b/lib/freesrp/freesrp_source_c.cc
new file mode 100644
index 0000000..9c56780
--- /dev/null
+++ b/lib/freesrp/freesrp_source_c.cc
@@ -0,0 +1,341 @@
+#include "freesrp_source_c.h"
+
+using namespace FreeSRP;
+using namespace std;
+
+freesrp_source_c_sptr make_freesrp_source_c (const string &args)
+{
+ return gnuradio::get_initial_sptr(new freesrp_source_c (args));
+}
+
+/*
+ * Specify constraints on number of input and output streams.
+ * This info is used to construct the input and output signatures
+ * (2nd & 3rd args to gr_block's constructor). The input and
+ * output signatures are used by the runtime system to
+ * check that a valid number and type of inputs and outputs
+ * are connected to this block. In this case, we accept
+ * only 0 input and 1 output.
+ */
+static const int MIN_IN = 0; // mininum number of input streams
+static const int MAX_IN = 0; // maximum number of input streams
+static const int MIN_OUT = 1; // minimum number of output streams
+static const int MAX_OUT = 1; // maximum number of output streams
+
+freesrp_source_c::freesrp_source_c (const string & args) : gr::sync_block ("freesrp_source_c",
+ gr::io_signature::make (MIN_IN, MAX_IN, sizeof (gr_complex)),
+ gr::io_signature::make (MIN_OUT, MAX_OUT, sizeof (gr_complex))),
+ freesrp_common(args)
+{
+ if(_srp == nullptr)
+ {
+ throw runtime_error("FreeSRP not initialized!");
+ }
+}
+
+bool freesrp_source_c::start()
+{
+ response res = _srp->send_cmd({SET_DATAPATH_EN, 1});
+ if(res.error != CMD_OK)
+ {
+ return false;
+ }
+ _srp->start_rx(std::bind(&freesrp_source_c::freesrp_rx_callback, this, std::placeholders::_1));
+
+ _running = true;
+
+ return true;
+}
+
+bool freesrp_source_c::stop()
+{
+ _srp->send_cmd({SET_DATAPATH_EN, 0});
+ _srp->stop_rx();
+
+ _running = false;
+
+ return true;
+}
+
+void freesrp_source_c::freesrp_rx_callback(const vector<sample> &samples)
+{
+ unique_lock<std::mutex> lk(_buf_mut);
+
+ for(const sample &s : samples)
+ {
+ if(!_buf_queue.try_enqueue(s))
+ {
+ if(!_ignore_overflow)
+ {
+ throw runtime_error("RX buffer overflow");
+ }
+ }
+ else
+ {
+ _buf_num_samples++;
+ }
+ }
+
+ _buf_cond.notify_one();
+}
+
+int freesrp_source_c::work(int noutput_items, gr_vector_const_void_star& input_items, gr_vector_void_star& output_items)
+{
+ gr_complex *out = static_cast<gr_complex *>(output_items[0]);
+
+ unique_lock<std::mutex> lk(_buf_mut);
+
+ if(!_running)
+ {
+ return WORK_DONE;
+ }
+
+ // Wait until enough samples collected
+ while(_buf_num_samples < (unsigned int) noutput_items)
+ {
+ _buf_cond.wait(lk);
+ }
+
+ for(int i = 0; i < noutput_items; ++i)
+ {
+ sample s;
+ if(!_buf_queue.try_dequeue(s))
+ {
+ // This should not be happening
+ throw runtime_error("Failed to get sample from buffer. This should never happen. Number of available samples reported to be " + to_string(_buf_num_samples) + ", noutput_items=" + to_string(noutput_items) + ", i=" + to_string(i));
+ }
+ else
+ {
+ _buf_num_samples--;
+ }
+
+ out[i] = gr_complex(((float) s.i) / 2048.0f, ((float) s.q) / 2048.0f);
+ }
+
+ return noutput_items;
+}
+
+double freesrp_source_c::set_sample_rate( double rate )
+{
+ command cmd = _srp->make_command(SET_RX_SAMP_FREQ, rate);
+ response r = _srp->send_cmd(cmd);
+ if(r.error != CMD_OK)
+ {
+ cerr << "Could not set RX sample rate, error: " << r.error << endl;
+ return 0;
+ }
+ else
+ {
+ return static_cast<double>(r.param);
+ }
+}
+
+double freesrp_source_c::get_sample_rate( void )
+{
+ response r = _srp->send_cmd({GET_RX_SAMP_FREQ, 0});
+ if(r.error != CMD_OK)
+ {
+ cerr << "Could not get RX sample rate, error: " << r.error << endl;
+ return 0;
+ }
+ else
+ {
+ return static_cast<double>(r.param);
+ }
+}
+
+double freesrp_source_c::set_center_freq( double freq, size_t chan )
+{
+ command cmd = _srp->make_command(SET_RX_LO_FREQ, freq);
+ response r = _srp->send_cmd(cmd);
+ if(r.error != CMD_OK)
+ {
+ cerr << "Could not set RX LO frequency, error: " << r.error << endl;
+ return 0;
+ }
+ else
+ {
+ return static_cast<double>(r.param);
+ }
+}
+
+double freesrp_source_c::get_center_freq( size_t chan )
+{
+ response r = _srp->send_cmd({GET_RX_LO_FREQ, 0});
+ if(r.error != CMD_OK)
+ {
+ cerr << "Could not get RX LO frequency, error: " << r.error << endl;
+ return 0;
+ }
+ else
+ {
+ return static_cast<double>(r.param);
+ }
+}
+
+vector<string> freesrp_source_c::get_gain_names( size_t chan )
+{
+ vector<string> names;
+
+ names.push_back("RF");
+
+ return names;
+}
+
+osmosdr::gain_range_t freesrp_source_c::get_gain_range(size_t chan)
+{
+ osmosdr::meta_range_t gain_ranges;
+
+ gain_ranges.push_back(osmosdr::range_t(0, 74, 1));
+
+ return gain_ranges;
+}
+
+bool freesrp_source_c::set_gain_mode( bool automatic, size_t chan )
+{
+ uint8_t gc_mode = RF_GAIN_SLOWATTACK_AGC;
+
+ if(!automatic)
+ {
+ gc_mode = RF_GAIN_MGC;
+ }
+
+ command cmd = _srp->make_command(SET_RX_GC_MODE, gc_mode);
+ response r = _srp->send_cmd(cmd);
+ if(r.error != CMD_OK)
+ {
+ cerr << "Could not set RX RF gain control mode, error: " << r.error << endl;
+ return false;
+ }
+ else
+ {
+ return r.param != RF_GAIN_MGC;
+ }
+}
+
+bool freesrp_source_c::get_gain_mode( size_t chan )
+{
+ response r = _srp->send_cmd({GET_RX_GC_MODE, 0});
+ if(r.error != CMD_OK)
+ {
+ cerr << "Could not get RX RF gain control mode, error: " << r.error << endl;
+ return false;
+ }
+ else
+ {
+ return r.param != RF_GAIN_MGC;
+ }
+}
+
+osmosdr::gain_range_t freesrp_source_c::get_gain_range(const string& name, size_t chan)
+{
+ return get_gain_range(chan);
+}
+
+double freesrp_source_c::set_gain(double gain, size_t chan)
+{
+ gain = get_gain_range().clip(gain);
+
+ command cmd = _srp->make_command(SET_RX_RF_GAIN, gain);
+ response r = _srp->send_cmd(cmd);
+ if(r.error != CMD_OK)
+ {
+ cerr << "Could not set RX RF gain, error: " << r.error << endl;
+ return 0;
+ }
+ else
+ {
+ return r.param;
+ }
+}
+
+double freesrp_source_c::set_gain(double gain, const string& name, size_t chan)
+{
+ if(name == "RF")
+ {
+ return set_gain(gain, chan);
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+double freesrp_source_c::get_gain(size_t chan)
+{
+ response r = _srp->send_cmd({GET_RX_RF_GAIN, 0});
+ if(r.error != CMD_OK)
+ {
+ cerr << "Could not get RX RF gain, error: " << r.error << endl;
+ return 0;
+ }
+ else
+ {
+ return (static_cast<double>(r.param));
+ }
+}
+
+double freesrp_source_c::get_gain(const string& name, size_t chan)
+{
+ if(name == "RF")
+ {
+ return get_gain(chan);
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+double freesrp_source_c::set_bb_gain(double gain, size_t chan)
+{
+ return set_gain(gain, chan);
+}
+
+vector<string> freesrp_source_c::get_antennas(size_t chan)
+{
+ vector<string> antennas;
+
+ antennas.push_back(get_antenna(chan));
+
+ return antennas;
+}
+
+string freesrp_source_c::set_antenna(const string& antenna, size_t chan)
+{
+ return get_antenna(chan);
+}
+
+string freesrp_source_c::get_antenna(size_t chan)
+{
+ return "RX";
+}
+
+double freesrp_source_c::set_bandwidth(double bandwidth, size_t chan)
+{
+ command cmd = _srp->make_command(SET_RX_RF_BANDWIDTH, bandwidth);
+ response r = _srp->send_cmd(cmd);
+ if(r.error != CMD_OK)
+ {
+ cerr << "Could not set RX RF bandwidth, error: " << r.error << endl;
+ return 0;
+ }
+ else
+ {
+ return static_cast<double>(r.param);
+ }
+}
+
+double freesrp_source_c::get_bandwidth(size_t chan)
+{
+ response r = _srp->send_cmd({GET_RX_RF_BANDWIDTH, 0});
+ if(r.error != CMD_OK)
+ {
+ cerr << "Could not get RX RF bandwidth, error: " << r.error << endl;
+ return 0;
+ }
+ else
+ {
+ return static_cast<double>(r.param);
+ }
+}
diff --git a/lib/freesrp/freesrp_source_c.h b/lib/freesrp/freesrp_source_c.h
new file mode 100644
index 0000000..08f115c
--- /dev/null
+++ b/lib/freesrp/freesrp_source_c.h
@@ -0,0 +1,131 @@
+/* -*- c++ -*- */
+/*
+ * Copyright 2015 Lukas Lao Beyer
+ * Copyright 2013 Dimitri Stolnikov <horiz0n@gmx.net>
+ *
+ * GNU Radio is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * GNU Radio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU Radio; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street,
+ * Boston, MA 02110-1301, USA.
+ */
+#ifndef INCLUDED_FREESRP_SOURCE_C_H
+#define INCLUDED_FREESRP_SOURCE_C_H
+
+#include <gnuradio/thread/thread.h>
+#include <gnuradio/block.h>
+#include <gnuradio/sync_block.h>
+
+#include "osmosdr/ranges.h"
+#include "source_iface.h"
+
+#include "freesrp_common.h"
+
+#include "readerwriterqueue/readerwriterqueue.h"
+
+#include <freesrp.hpp>
+
+#include <mutex>
+#include <condition_variable>
+
+class freesrp_source_c;
+
+/*
+ * We use boost::shared_ptr's instead of raw pointers for all access
+ * to gr_blocks (and many other data structures). The shared_ptr gets
+ * us transparent reference counting, which greatly simplifies storage
+ * management issues. This is especially helpful in our hybrid
+ * C++ / Python system.
+ *
+ * See http://www.boost.org/libs/smart_ptr/smart_ptr.htm
+ *
+ * As a convention, the _sptr suffix indicates a boost::shared_ptr
+ */
+typedef boost::shared_ptr<freesrp_source_c> freesrp_source_c_sptr;
+
+/*!
+ * \brief Return a shared_ptr to a new instance of freesrp_source_c.
+ *
+ * To avoid accidental use of raw pointers, freesrp_source_c's
+ * constructor is private. freesrp_make_source_c is the public
+ * interface for creating new instances.
+ */
+freesrp_source_c_sptr make_freesrp_source_c (const std::string & args = "");
+
+class freesrp_source_c :
+ public gr::sync_block,
+ public source_iface,
+ public freesrp_common
+{
+private:
+ // The friend declaration allows freesrp_make_source_c to
+ // access the private constructor.
+ friend freesrp_source_c_sptr make_freesrp_source_c (const std::string & args);
+
+ freesrp_source_c (const std::string & args); // private constructor
+
+public:
+
+ // From freesrp_common:
+ static std::vector<std::string> get_devices() { return freesrp_common::get_devices(); };
+ size_t get_num_channels( void ) { return freesrp_common::get_num_channels(); }
+ osmosdr::meta_range_t get_sample_rates( void ) { return freesrp_common::get_sample_rates(); }
+ osmosdr::freq_range_t get_freq_range( size_t chan = 0 ) { return freesrp_common::get_freq_range(chan); }
+ osmosdr::freq_range_t get_bandwidth_range( size_t chan = 0 ) { return freesrp_common::get_bandwidth_range(chan); }
+ double set_freq_corr( double ppm, size_t chan = 0 ) { return freesrp_common::set_freq_corr(ppm, chan); }
+ double get_freq_corr( size_t chan = 0 ) { return freesrp_common::get_freq_corr(chan); }
+
+ bool start();
+ bool stop();
+
+ int work( int noutput_items,
+ gr_vector_const_void_star &input_items,
+ gr_vector_void_star &output_items );
+
+ double set_sample_rate( double rate );
+ double get_sample_rate( void );
+
+ double set_center_freq( double freq, size_t chan = 0 );
+ double get_center_freq( size_t chan = 0 );
+
+ std::vector<std::string> get_gain_names( size_t chan = 0 );
+ osmosdr::gain_range_t get_gain_range( size_t chan = 0 );
+ osmosdr::gain_range_t get_gain_range( const std::string & name, size_t chan = 0 );
+ bool set_gain_mode( bool automatic, size_t chan = 0 );
+ bool get_gain_mode( size_t chan = 0 );
+ double set_gain( double gain, size_t chan = 0 );
+ double set_gain( double gain, const std::string & name, size_t chan = 0 );
+ double get_gain( size_t chan = 0 );
+ double get_gain( const std::string & name, size_t chan = 0 );
+
+ double set_bb_gain( double gain, size_t chan = 0 );
+
+ std::vector< std::string > get_antennas( size_t chan = 0 );
+ std::string set_antenna( const std::string & antenna, size_t chan = 0 );
+ std::string get_antenna( size_t chan = 0 );
+
+ double set_bandwidth( double bandwidth, size_t chan = 0 );
+ double get_bandwidth( size_t chan = 0 );
+
+private:
+
+ void freesrp_rx_callback(const std::vector<FreeSRP::sample> &samples);
+
+ bool _running = false;
+
+ std::mutex _buf_mut{};
+ std::condition_variable _buf_cond{};
+ size_t _buf_num_samples = 0;
+ moodycamel::ReaderWriterQueue<FreeSRP::sample> _buf_queue{FREESRP_RX_TX_QUEUE_SIZE};
+};
+
+#endif /* INCLUDED_FREESRP_SOURCE_C_H */
diff --git a/lib/freesrp/readerwriterqueue/LICENSE.md b/lib/freesrp/readerwriterqueue/LICENSE.md
new file mode 100644
index 0000000..76d802e
--- /dev/null
+++ b/lib/freesrp/readerwriterqueue/LICENSE.md
@@ -0,0 +1,28 @@
+This license applies to all the code in this repository except that written by third
+parties, namely the files in benchmarks/ext, which have their own licenses, and Jeff
+Preshing's semaphore implementation (used in the blocking queue) which has a zlib
+license (embedded in atomicops.h).
+
+Simplified BSD License:
+
+Copyright (c) 2013-2015, Cameron Desrochers
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+- Redistributions of source code must retain the above copyright notice, this list of
+conditions and the following disclaimer.
+- Redistributions in binary form must reproduce the above copyright notice, this list of
+conditions and the following disclaimer in the documentation and/or other materials
+provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/lib/freesrp/readerwriterqueue/README.md b/lib/freesrp/readerwriterqueue/README.md
new file mode 100644
index 0000000..3d94e1a
--- /dev/null
+++ b/lib/freesrp/readerwriterqueue/README.md
@@ -0,0 +1,114 @@
+# A single-producer, single-consumer lock-free queue for C++
+
+This mini-repository has my very own implementation of a lock-free queue (that I designed from scratch) for C++.
+
+It only supports a two-thread use case (one consuming, and one producing). The threads can't switch roles, though
+you could use this queue completely from a single thread if you wish (but that would sort of defeat the purpose!).
+
+Note: If you need a general purpse multi-producer, multi-consumer lock free queue, I have [one of those too][mpmc].
+
+
+## Features
+
+- [Blazing fast][benchmarks]
+- Compatible with C++11 (supports moving objects instead of making copies)
+- Fully generic (templated container of any type) -- just like `std::queue`, you never need to allocate memory for elements yourself
+ (which saves you the hassle of writing a lock-free memory manager to hold the elements you're queueing)
+- Allocates memory up front, in contiguous blocks
+- Provides a `try_enqueue` method which is guaranteed never to allocate memory (the queue starts with an initial capacity)
+- Also provides an `enqueue` method which can dynamically grow the size of the queue as needed
+- Also provides a blocking version with `wait_dequeue`
+- Completely "wait-free" (no compare-and-swap loop). Enqueue and dequeue are always O(1) (not counting memory allocation)
+- On x86, the memory barriers compile down to no-ops, meaning enqueue and dequeue are just a simple series of loads and stores (and branches)
+
+
+## Use
+
+Simply drop the readerwriterqueue.h and atomicops.h files into your source code and include them :-)
+A modern compiler is required (MSVC2010+, GCC 4.7+, ICC 13+, or any C++11 compliant compiler should work).
+
+Note: If you're using GCC, you really do need GCC 4.7 or above -- [4.6 has a bug][gcc46bug] that prevents the atomic fence primitives
+from working correctly.
+
+Example:
+
+```cpp
+using namespace moodycamel;
+
+ReaderWriterQueue<int> q(100); // Reserve space for at least 100 elements up front
+
+q.enqueue(17); // Will allocate memory if the queue is full
+bool succeeded = q.try_enqueue(18); // Will only succeed if the queue has an empty slot (never allocates)
+assert(succeeded);
+
+int number;
+succeeded = q.try_dequeue(number); // Returns false if the queue was empty
+
+assert(succeeded && number == 17);
+
+// You can also peek at the front item of the queue (consumer only)
+int* front = q.peek();
+assert(*front == 18);
+succeeded = q.try_dequeue(number);
+assert(succeeded && number == 18);
+front = q.peek();
+assert(front == nullptr); // Returns nullptr if the queue was empty
+```
+
+The blocking version has the exact same API, with the addition of a `wait_dequeue` method:
+
+```cpp
+BlockingReaderWriterQueue<int> q;
+
+std::thread reader([&]() {
+ int item;
+ for (int i = 0; i != 100; ++i) {
+ q.wait_dequeue(item);
+ }
+});
+std::thread writer([&]() {
+ for (int i = 0; i != 100; ++i) {
+ q.enqueue(i);
+ }
+});
+writer.join();
+reader.join();
+
+assert(q.size_approx() == 0);
+```
+
+Note that `wait_dequeue` will block indefinitely while the queue is empty; this
+means care must be taken to only call `wait_dequeue` if you're sure another element
+will come along eventually, or if the queue has a static lifetime. This is because
+destroying the queue while a thread is waiting on it will invoke undefined behaviour.
+
+
+## Disclaimers
+
+The queue should only be used on platforms where aligned integer and pointer access is atomic; fortunately, that
+includes all modern processors (e.g. x86/x86-64, ARM, and PowerPC). *Not* for use with a DEC Alpha processor (which has very weak memory ordering) :-)
+
+Note that it's only been tested on x86(-64); if someone has access to other processors I'd love to run some tests on
+anything that's not x86-based.
+
+Finally, I am not an expert. This is my first foray into lock-free programming, and though I'm confident in the code,
+it's possible that there are bugs despite the effort I put into designing and testing this data structure.
+
+Use this code at your own risk; in particular, lock-free programming is a patent minefield, and this code may very
+well violate a pending patent (I haven't looked). It's worth noting that I came up with this algorithm and
+implementation from scratch, independent of any existing lock-free queues.
+
+
+## More info
+
+See the [LICENSE.md][license] file for the license (simplified BSD).
+
+My [blog post][blog] introduces the context that led to this code, and may be of interest if you're curious
+about lock-free programming.
+
+
+[blog]: http://moodycamel.com/blog/2013/a-fast-lock-free-queue-for-c++
+[license]: LICENSE.md
+[benchmarks]: http://moodycamel.com/blog/2013/a-fast-lock-free-queue-for-c++#benchmarks
+[gcc46bug]: http://stackoverflow.com/questions/16429669/stdatomic-thread-fence-has-undefined-reference
+[mpmc]: https://github.com/cameron314/concurrentqueue
diff --git a/lib/freesrp/readerwriterqueue/atomicops.h b/lib/freesrp/readerwriterqueue/atomicops.h
new file mode 100644
index 0000000..1bd2455
--- /dev/null
+++ b/lib/freesrp/readerwriterqueue/atomicops.h
@@ -0,0 +1,577 @@
+// ©2013-2015 Cameron Desrochers.
+// Distributed under the simplified BSD license (see the license file that
+// should have come with this header).
+// Uses Jeff Preshing's semaphore implementation (under the terms of its
+// separate zlib license, embedded below).
+
+#pragma once
+
+// Provides portable (VC++2010+, Intel ICC 13, GCC 4.7+, and anything C++11 compliant) implementation
+// of low-level memory barriers, plus a few semi-portable utility macros (for inlining and alignment).
+// Also has a basic atomic type (limited to hardware-supported atomics with no memory ordering guarantees).
+// Uses the AE_* prefix for macros (historical reasons), and the "moodycamel" namespace for symbols.
+
+#include <cassert>
+#include <type_traits>
+
+
+// Platform detection
+#if defined(__INTEL_COMPILER)
+#define AE_ICC
+#elif defined(_MSC_VER)
+#define AE_VCPP
+#elif defined(__GNUC__)
+#define AE_GCC
+#endif
+
+#if defined(_M_IA64) || defined(__ia64__)
+#define AE_ARCH_IA64
+#elif defined(_WIN64) || defined(__amd64__) || defined(_M_X64) || defined(__x86_64__)
+#define AE_ARCH_X64
+#elif defined(_M_IX86) || defined(__i386__)
+#define AE_ARCH_X86
+#elif defined(_M_PPC) || defined(__powerpc__)
+#define AE_ARCH_PPC
+#else
+#define AE_ARCH_UNKNOWN
+#endif
+
+
+// AE_UNUSED
+#define AE_UNUSED(x) ((void)x)
+
+
+// AE_FORCEINLINE
+#if defined(AE_VCPP) || defined(AE_ICC)
+#define AE_FORCEINLINE __forceinline
+#elif defined(AE_GCC)
+//#define AE_FORCEINLINE __attribute__((always_inline))
+#define AE_FORCEINLINE inline
+#else
+#define AE_FORCEINLINE inline
+#endif
+
+
+// AE_ALIGN
+#if defined(AE_VCPP) || defined(AE_ICC)
+#define AE_ALIGN(x) __declspec(align(x))
+#elif defined(AE_GCC)
+#define AE_ALIGN(x) __attribute__((aligned(x)))
+#else
+// Assume GCC compliant syntax...
+#define AE_ALIGN(x) __attribute__((aligned(x)))
+#endif
+
+
+// Portable atomic fences implemented below:
+
+namespace moodycamel {
+
+enum memory_order {
+ memory_order_relaxed,
+ memory_order_acquire,
+ memory_order_release,
+ memory_order_acq_rel,
+ memory_order_seq_cst,
+
+ // memory_order_sync: Forces a full sync:
+ // #LoadLoad, #LoadStore, #StoreStore, and most significantly, #StoreLoad
+ memory_order_sync = memory_order_seq_cst
+};
+
+} // end namespace moodycamel
+
+#if (defined(AE_VCPP) && (_MSC_VER < 1700 || defined(__cplusplus_cli))) || defined(AE_ICC)
+// VS2010 and ICC13 don't support std::atomic_*_fence, implement our own fences
+
+#include <intrin.h>
+
+#if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
+#define AeFullSync _mm_mfence
+#define AeLiteSync _mm_mfence
+#elif defined(AE_ARCH_IA64)
+#define AeFullSync __mf
+#define AeLiteSync __mf
+#elif defined(AE_ARCH_PPC)
+#include <ppcintrinsics.h>
+#define AeFullSync __sync
+#define AeLiteSync __lwsync
+#endif
+
+
+#ifdef AE_VCPP
+#pragma warning(push)
+#pragma warning(disable: 4365) // Disable erroneous 'conversion from long to unsigned int, signed/unsigned mismatch' error when using `assert`
+#ifdef __cplusplus_cli
+#pragma managed(push, off)
+#endif
+#endif
+
+namespace moodycamel {
+
+AE_FORCEINLINE void compiler_fence(memory_order order)
+{
+ switch (order) {
+ case memory_order_relaxed: break;
+ case memory_order_acquire: _ReadBarrier(); break;
+ case memory_order_release: _WriteBarrier(); break;
+ case memory_order_acq_rel: _ReadWriteBarrier(); break;
+ case memory_order_seq_cst: _ReadWriteBarrier(); break;
+ default: assert(false);
+ }
+}
+
+// x86/x64 have a strong memory model -- all loads and stores have
+// acquire and release semantics automatically (so only need compiler
+// barriers for those).
+#if defined(AE_ARCH_X86) || defined(AE_ARCH_X64)
+AE_FORCEINLINE void fence(memory_order order)
+{
+ switch (order) {
+ case memory_order_relaxed: break;
+ case memory_order_acquire: _ReadBarrier(); break;
+ case memory_order_release: _WriteBarrier(); break;
+ case memory_order_acq_rel: _ReadWriteBarrier(); break;
+ case memory_order_seq_cst:
+ _ReadWriteBarrier();
+ AeFullSync();
+ _ReadWriteBarrier();
+ break;
+ default: assert(false);
+ }
+}
+#else
+AE_FORCEINLINE void fence(memory_order order)
+{
+ // Non-specialized arch, use heavier memory barriers everywhere just in case :-(
+ switch (order) {
+ case memory_order_relaxed:
+ break;
+ case memory_order_acquire:
+ _ReadBarrier();
+ AeLiteSync();
+ _ReadBarrier();
+ break;
+ case memory_order_release:
+ _WriteBarrier();
+ AeLiteSync();
+ _WriteBarrier();
+ break;
+ case memory_order_acq_rel:
+ _ReadWriteBarrier();
+ AeLiteSync();
+ _ReadWriteBarrier();
+ break;
+ case memory_order_seq_cst:
+ _ReadWriteBarrier();
+ AeFullSync();
+ _ReadWriteBarrier();
+ break;
+ default: assert(false);
+ }
+}
+#endif
+} // end namespace moodycamel
+#else
+// Use standard library of atomics
+#include <atomic>
+
+namespace moodycamel {
+
+AE_FORCEINLINE void compiler_fence(memory_order order)
+{
+ switch (order) {
+ case memory_order_relaxed: break;
+ case memory_order_acquire: std::atomic_signal_fence(std::memory_order_acquire); break;
+ case memory_order_release: std::atomic_signal_fence(std::memory_order_release); break;
+ case memory_order_acq_rel: std::atomic_signal_fence(std::memory_order_acq_rel); break;
+ case memory_order_seq_cst: std::atomic_signal_fence(std::memory_order_seq_cst); break;
+ default: assert(false);
+ }
+}
+
+AE_FORCEINLINE void fence(memory_order order)
+{
+ switch (order) {
+ case memory_order_relaxed: break;
+ case memory_order_acquire: std::atomic_thread_fence(std::memory_order_acquire); break;
+ case memory_order_release: std::atomic_thread_fence(std::memory_order_release); break;
+ case memory_order_acq_rel: std::atomic_thread_fence(std::memory_order_acq_rel); break;
+ case memory_order_seq_cst: std::atomic_thread_fence(std::memory_order_seq_cst); break;
+ default: assert(false);
+ }
+}
+
+} // end namespace moodycamel
+
+#endif
+
+
+#if !defined(AE_VCPP) || (_MSC_VER >= 1700 && !defined(__cplusplus_cli))
+#define AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
+#endif
+
+#ifdef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
+#include <atomic>
+#endif
+#include <utility>
+
+// WARNING: *NOT* A REPLACEMENT FOR std::atomic. READ CAREFULLY:
+// Provides basic support for atomic variables -- no memory ordering guarantees are provided.
+// The guarantee of atomicity is only made for types that already have atomic load and store guarantees
+// at the hardware level -- on most platforms this generally means aligned pointers and integers (only).
+namespace moodycamel {
+template<typename T>
+class weak_atomic
+{
+public:
+ weak_atomic() { }
+#ifdef AE_VCPP
+#pragma warning(disable: 4100) // Get rid of (erroneous) 'unreferenced formal parameter' warning
+#endif
+ template<typename U> weak_atomic(U&& x) : value(std::forward<U>(x)) { }
+#ifdef __cplusplus_cli
+ // Work around bug with universal reference/nullptr combination that only appears when /clr is on
+ weak_atomic(nullptr_t) : value(nullptr) { }
+#endif
+ weak_atomic(weak_atomic const& other) : value(other.value) { }
+ weak_atomic(weak_atomic&& other) : value(std::move(other.value)) { }
+#ifdef AE_VCPP
+#pragma warning(default: 4100)
+#endif
+
+ AE_FORCEINLINE operator T() const { return load(); }
+
+
+#ifndef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
+ template<typename U> AE_FORCEINLINE weak_atomic const& operator=(U&& x) { value = std::forward<U>(x); return *this; }
+ AE_FORCEINLINE weak_atomic const& operator=(weak_atomic const& other) { value = other.value; return *this; }
+
+ AE_FORCEINLINE T load() const { return value; }
+
+ AE_FORCEINLINE T fetch_add_acquire(T increment)
+ {
+#if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
+ if (sizeof(T) == 4) return _InterlockedExchangeAdd((long volatile*)&value, (long)increment);
+#if defined(_M_AMD64)
+ else if (sizeof(T) == 8) return _InterlockedExchangeAdd64((long long volatile*)&value, (long long)increment);
+#endif
+#else
+#error Unsupported platform
+#endif
+ assert(false && "T must be either a 32 or 64 bit type");
+ return value;
+ }
+
+ AE_FORCEINLINE T fetch_add_release(T increment)
+ {
+#if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
+ if (sizeof(T) == 4) return _InterlockedExchangeAdd((long volatile*)&value, (long)increment);
+#if defined(_M_AMD64)
+ else if (sizeof(T) == 8) return _InterlockedExchangeAdd64((long long volatile*)&value, (long long)increment);
+#endif
+#else
+#error Unsupported platform
+#endif
+ assert(false && "T must be either a 32 or 64 bit type");
+ return value;
+ }
+#else
+ template<typename U>
+ AE_FORCEINLINE weak_atomic const& operator=(U&& x)
+ {
+ value.store(std::forward<U>(x), std::memory_order_relaxed);
+ return *this;
+ }
+
+ AE_FORCEINLINE weak_atomic const& operator=(weak_atomic const& other)
+ {
+ value.store(other.value.load(std::memory_order_relaxed), std::memory_order_relaxed);
+ return *this;
+ }
+
+ AE_FORCEINLINE T load() const { return value.load(std::memory_order_relaxed); }
+
+ AE_FORCEINLINE T fetch_add_acquire(T increment)
+ {
+ return value.fetch_add(increment, std::memory_order_acquire);
+ }
+
+ AE_FORCEINLINE T fetch_add_release(T increment)
+ {
+ return value.fetch_add(increment, std::memory_order_release);
+ }
+#endif
+
+
+private:
+#ifndef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
+ // No std::atomic support, but still need to circumvent compiler optimizations.
+ // `volatile` will make memory access slow, but is guaranteed to be reliable.
+ volatile T value;
+#else
+ std::atomic<T> value;
+#endif
+};
+
+} // end namespace moodycamel
+
+
+
+// Portable single-producer, single-consumer semaphore below:
+
+#if defined(_WIN32)
+// Avoid including windows.h in a header; we only need a handful of
+// items, so we'll redeclare them here (this is relatively safe since
+// the API generally has to remain stable between Windows versions).
+// I know this is an ugly hack but it still beats polluting the global
+// namespace with thousands of generic names or adding a .cpp for nothing.
+extern "C" {
+ struct _SECURITY_ATTRIBUTES;
+ __declspec(dllimport) void* __stdcall CreateSemaphoreW(_SECURITY_ATTRIBUTES* lpSemaphoreAttributes, long lInitialCount, long lMaximumCount, const wchar_t* lpName);
+ __declspec(dllimport) int __stdcall CloseHandle(void* hObject);
+ __declspec(dllimport) unsigned long __stdcall WaitForSingleObject(void* hHandle, unsigned long dwMilliseconds);
+ __declspec(dllimport) int __stdcall ReleaseSemaphore(void* hSemaphore, long lReleaseCount, long* lpPreviousCount);
+}
+#elif defined(__MACH__)
+#include <mach/mach.h>
+#elif defined(__unix__)
+#include <semaphore.h>
+#endif
+
+namespace moodycamel
+{
+ // Code in the spsc_sema namespace below is an adaptation of Jeff Preshing's
+ // portable + lightweight semaphore implementations, originally from
+ // https://github.com/preshing/cpp11-on-multicore/blob/master/common/sema.h
+ // LICENSE:
+ // Copyright (c) 2015 Jeff Preshing
+ //
+ // This software is provided 'as-is', without any express or implied
+ // warranty. In no event will the authors be held liable for any damages
+ // arising from the use of this software.
+ //
+ // Permission is granted to anyone to use this software for any purpose,
+ // including commercial applications, and to alter it and redistribute it
+ // freely, subject to the following restrictions:
+ //
+ // 1. The origin of this software must not be misrepresented; you must not
+ // claim that you wrote the original software. If you use this software
+ // in a product, an acknowledgement in the product documentation would be
+ // appreciated but is not required.
+ // 2. Altered source versions must be plainly marked as such, and must not be
+ // misrepresented as being the original software.
+ // 3. This notice may not be removed or altered from any source distribution.
+ namespace spsc_sema
+ {
+#if defined(_WIN32)
+ class Semaphore
+ {
+ private:
+ void* m_hSema;
+
+ Semaphore(const Semaphore& other);
+ Semaphore& operator=(const Semaphore& other);
+
+ public:
+ Semaphore(int initialCount = 0)
+ {
+ assert(initialCount >= 0);
+ const long maxLong = 0x7fffffff;
+ m_hSema = CreateSemaphoreW(nullptr, initialCount, maxLong, nullptr);
+ }
+
+ ~Semaphore()
+ {
+ CloseHandle(m_hSema);
+ }
+
+ void wait()
+ {
+ const unsigned long infinite = 0xffffffff;
+ WaitForSingleObject(m_hSema, infinite);
+ }
+
+ void signal(int count = 1)
+ {
+ ReleaseSemaphore(m_hSema, count, nullptr);
+ }
+ };
+#elif defined(__MACH__)
+ //---------------------------------------------------------
+ // Semaphore (Apple iOS and OSX)
+ // Can't use POSIX semaphores due to http://lists.apple.com/archives/darwin-kernel/2009/Apr/msg00010.html
+ //---------------------------------------------------------
+ class Semaphore
+ {
+ private:
+ semaphore_t m_sema;
+
+ Semaphore(const Semaphore& other);
+ Semaphore& operator=(const Semaphore& other);
+
+ public:
+ Semaphore(int initialCount = 0)
+ {
+ assert(initialCount >= 0);
+ semaphore_create(mach_task_self(), &m_sema, SYNC_POLICY_FIFO, initialCount);
+ }
+
+ ~Semaphore()
+ {
+ semaphore_destroy(mach_task_self(), m_sema);
+ }
+
+ void wait()
+ {
+ semaphore_wait(m_sema);
+ }
+
+ void signal()
+ {
+ semaphore_signal(m_sema);
+ }
+
+ void signal(int count)
+ {
+ while (count-- > 0)
+ {
+ semaphore_signal(m_sema);
+ }
+ }
+ };
+#elif defined(__unix__)
+ //---------------------------------------------------------
+ // Semaphore (POSIX, Linux)
+ //---------------------------------------------------------
+ class Semaphore
+ {
+ private:
+ sem_t m_sema;
+
+ Semaphore(const Semaphore& other);
+ Semaphore& operator=(const Semaphore& other);
+
+ public:
+ Semaphore(int initialCount = 0)
+ {
+ assert(initialCount >= 0);
+ sem_init(&m_sema, 0, initialCount);
+ }
+
+ ~Semaphore()
+ {
+ sem_destroy(&m_sema);
+ }
+
+ void wait()
+ {
+ // http://stackoverflow.com/questions/2013181/gdb-causes-sem-wait-to-fail-with-eintr-error
+ int rc;
+ do
+ {
+ rc = sem_wait(&m_sema);
+ }
+ while (rc == -1 && errno == EINTR);
+ }
+
+ void signal()
+ {
+ sem_post(&m_sema);
+ }
+
+ void signal(int count)
+ {
+ while (count-- > 0)
+ {
+ sem_post(&m_sema);
+ }
+ }
+ };
+#else
+#error Unsupported platform! (No semaphore wrapper available)
+#endif
+
+ //---------------------------------------------------------
+ // LightweightSemaphore
+ //---------------------------------------------------------
+ class LightweightSemaphore
+ {
+ public:
+ typedef std::make_signed<std::size_t>::type ssize_t;
+
+ private:
+ weak_atomic<ssize_t> m_count;
+ Semaphore m_sema;
+
+ void waitWithPartialSpinning()
+ {
+ ssize_t oldCount;
+ // Is there a better way to set the initial spin count?
+ // If we lower it to 1000, testBenaphore becomes 15x slower on my Core i7-5930K Windows PC,
+ // as threads start hitting the kernel semaphore.
+ int spin = 10000;
+ while (--spin >= 0)
+ {
+ if (m_count.load() > 0)
+ {
+ m_count.fetch_add_acquire(-1);
+ return;
+ }
+ compiler_fence(memory_order_acquire); // Prevent the compiler from collapsing the loop.
+ }
+ oldCount = m_count.fetch_add_acquire(-1);
+ if (oldCount <= 0)
+ {
+ m_sema.wait();
+ }
+ }
+
+ public:
+ LightweightSemaphore(ssize_t initialCount = 0) : m_count(initialCount)
+ {
+ assert(initialCount >= 0);
+ }
+
+ bool tryWait()
+ {
+ if (m_count.load() > 0)
+ {
+ m_count.fetch_add_acquire(-1);
+ return true;
+ }
+ return false;
+ }
+
+ void wait()
+ {
+ if (!tryWait())
+ waitWithPartialSpinning();
+ }
+
+ void signal(ssize_t count = 1)
+ {
+ assert(count >= 0);
+ ssize_t oldCount = m_count.fetch_add_release(count);
+ assert(oldCount >= -1);
+ if (oldCount < 0)
+ {
+ m_sema.signal(1);
+ }
+ }
+
+ ssize_t availableApprox() const
+ {
+ ssize_t count = m_count.load();
+ return count > 0 ? count : 0;
+ }
+ };
+ } // end namespace spsc_sema
+} // end namespace moodycamel
+
+#if defined(AE_VCPP) && (_MSC_VER < 1700 || defined(__cplusplus_cli))
+#pragma warning(pop)
+#ifdef __cplusplus_cli
+#pragma managed(pop)
+#endif
+#endif
diff --git a/lib/freesrp/readerwriterqueue/readerwriterqueue.h b/lib/freesrp/readerwriterqueue/readerwriterqueue.h
new file mode 100644
index 0000000..e0ac56b
--- /dev/null
+++ b/lib/freesrp/readerwriterqueue/readerwriterqueue.h
@@ -0,0 +1,764 @@
+// ©2013-2015 Cameron Desrochers.
+// Distributed under the simplified BSD license (see the license file that
+// should have come with this header).
+
+#pragma once
+
+#include "atomicops.h"
+#include <type_traits>
+#include <utility>
+#include <cassert>
+#include <stdexcept>
+#include <cstdint>
+#include <cstdlib> // For malloc/free & size_t
+
+
+// A lock-free queue for a single-consumer, single-producer architecture.
+// The queue is also wait-free in the common path (except if more memory
+// needs to be allocated, in which case malloc is called).
+// Allocates memory sparingly (O(lg(n) times, amortized), and only once if
+// the original maximum size estimate is never exceeded.
+// Tested on x86/x64 processors, but semantics should be correct for all
+// architectures (given the right implementations in atomicops.h), provided
+// that aligned integer and pointer accesses are naturally atomic.
+// Note that there should only be one consumer thread and producer thread;
+// Switching roles of the threads, or using multiple consecutive threads for
+// one role, is not safe unless properly synchronized.
+// Using the queue exclusively from one thread is fine, though a bit silly.
+
+#define CACHE_LINE_SIZE 64
+
+#ifdef AE_VCPP
+#pragma warning(push)
+#pragma warning(disable: 4324) // structure was padded due to __declspec(align())
+#pragma warning(disable: 4820) // padding was added
+#pragma warning(disable: 4127) // conditional expression is constant
+#endif
+
+namespace moodycamel {
+
+template<typename T, size_t MAX_BLOCK_SIZE = 512>
+class ReaderWriterQueue
+{
+ // Design: Based on a queue-of-queues. The low-level queues are just
+ // circular buffers with front and tail indices indicating where the
+ // next element to dequeue is and where the next element can be enqueued,
+ // respectively. Each low-level queue is called a "block". Each block
+ // wastes exactly one element's worth of space to keep the design simple
+ // (if front == tail then the queue is empty, and can't be full).
+ // The high-level queue is a circular linked list of blocks; again there
+ // is a front and tail, but this time they are pointers to the blocks.
+ // The front block is where the next element to be dequeued is, provided
+ // the block is not empty. The back block is where elements are to be
+ // enqueued, provided the block is not full.
+ // The producer thread owns all the tail indices/pointers. The consumer
+ // thread owns all the front indices/pointers. Both threads read each
+ // other's variables, but only the owning thread updates them. E.g. After
+ // the consumer reads the producer's tail, the tail may change before the
+ // consumer is done dequeuing an object, but the consumer knows the tail
+ // will never go backwards, only forwards.
+ // If there is no room to enqueue an object, an additional block (of
+ // equal size to the last block) is added. Blocks are never removed.
+
+public:
+ // Constructs a queue that can hold maxSize elements without further
+ // allocations. If more than MAX_BLOCK_SIZE elements are requested,
+ // then several blocks of MAX_BLOCK_SIZE each are reserved (including
+ // at least one extra buffer block).
+ explicit ReaderWriterQueue(size_t maxSize = 15)
+#ifndef NDEBUG
+ : enqueuing(false)
+ ,dequeuing(false)
+#endif
+ {
+ assert(maxSize > 0);
+ assert(MAX_BLOCK_SIZE == ceilToPow2(MAX_BLOCK_SIZE) && "MAX_BLOCK_SIZE must be a power of 2");
+ assert(MAX_BLOCK_SIZE >= 2 && "MAX_BLOCK_SIZE must be at least 2");
+
+ Block* firstBlock = nullptr;
+
+ largestBlockSize = ceilToPow2(maxSize + 1); // We need a spare slot to fit maxSize elements in the block
+ if (largestBlockSize > MAX_BLOCK_SIZE * 2) {
+ // We need a spare block in case the producer is writing to a different block the consumer is reading from, and
+ // wants to enqueue the maximum number of elements. We also need a spare element in each block to avoid the ambiguity
+ // between front == tail meaning "empty" and "full".
+ // So the effective number of slots that are guaranteed to be usable at any time is the block size - 1 times the
+ // number of blocks - 1. Solving for maxSize and applying a ceiling to the division gives us (after simplifying):
+ size_t initialBlockCount = (maxSize + MAX_BLOCK_SIZE * 2 - 3) / (MAX_BLOCK_SIZE - 1);
+ largestBlockSize = MAX_BLOCK_SIZE;
+ Block* lastBlock = nullptr;
+ for (size_t i = 0; i != initialBlockCount; ++i) {
+ auto block = make_block(largestBlockSize);
+ if (block == nullptr) {
+ throw std::bad_alloc();
+ }
+ if (firstBlock == nullptr) {
+ firstBlock = block;
+ }
+ else {
+ lastBlock->next = block;
+ }
+ lastBlock = block;
+ block->next = firstBlock;
+ }
+ }
+ else {
+ firstBlock = make_block(largestBlockSize);
+ if (firstBlock == nullptr) {
+ throw std::bad_alloc();
+ }
+ firstBlock->next = firstBlock;
+ }
+ frontBlock = firstBlock;
+ tailBlock = firstBlock;
+
+ // Make sure the reader/writer threads will have the initialized memory setup above:
+ fence(memory_order_sync);
+ }
+
+ // Note: The queue should not be accessed concurrently while it's
+ // being deleted. It's up to the user to synchronize this.
+ ~ReaderWriterQueue()
+ {
+ // Make sure we get the latest version of all variables from other CPUs:
+ fence(memory_order_sync);
+
+ // Destroy any remaining objects in queue and free memory
+ Block* frontBlock_ = frontBlock;
+ Block* block = frontBlock_;
+ do {
+ Block* nextBlock = block->next;
+ size_t blockFront = block->front;
+ size_t blockTail = block->tail;
+
+ for (size_t i = blockFront; i != blockTail; i = (i + 1) & block->sizeMask) {
+ auto element = reinterpret_cast<T*>(block->data + i * sizeof(T));
+ element->~T();
+ (void)element;
+ }
+
+ auto rawBlock = block->rawThis;
+ block->~Block();
+ std::free(rawBlock);
+ block = nextBlock;
+ } while (block != frontBlock_);
+ }
+
+
+ // Enqueues a copy of element if there is room in the queue.
+ // Returns true if the element was enqueued, false otherwise.
+ // Does not allocate memory.
+ AE_FORCEINLINE bool try_enqueue(T const& element)
+ {
+ return inner_enqueue<CannotAlloc>(element);
+ }
+
+ // Enqueues a moved copy of element if there is room in the queue.
+ // Returns true if the element was enqueued, false otherwise.
+ // Does not allocate memory.
+ AE_FORCEINLINE bool try_enqueue(T&& element)
+ {
+ return inner_enqueue<CannotAlloc>(std::forward<T>(element));
+ }
+
+
+ // Enqueues a copy of element on the queue.
+ // Allocates an additional block of memory if needed.
+ // Only fails (returns false) if memory allocation fails.
+ AE_FORCEINLINE bool enqueue(T const& element)
+ {
+ return inner_enqueue<CanAlloc>(element);
+ }
+
+ // Enqueues a moved copy of element on the queue.
+ // Allocates an additional block of memory if needed.
+ // Only fails (returns false) if memory allocation fails.
+ AE_FORCEINLINE bool enqueue(T&& element)
+ {
+ return inner_enqueue<CanAlloc>(std::forward<T>(element));
+ }
+
+
+ // Attempts to dequeue an element; if the queue is empty,
+ // returns false instead. If the queue has at least one element,
+ // moves front to result using operator=, then returns true.
+ template<typename U>
+ bool try_dequeue(U& result)
+ {
+#ifndef NDEBUG
+ ReentrantGuard guard(this->dequeuing);
+#endif
+
+ // High-level pseudocode:
+ // Remember where the tail block is
+ // If the front block has an element in it, dequeue it
+ // Else
+ // If front block was the tail block when we entered the function, return false
+ // Else advance to next block and dequeue the item there
+
+ // Note that we have to use the value of the tail block from before we check if the front
+ // block is full or not, in case the front block is empty and then, before we check if the
+ // tail block is at the front block or not, the producer fills up the front block *and
+ // moves on*, which would make us skip a filled block. Seems unlikely, but was consistently
+ // reproducible in practice.
+ // In order to avoid overhead in the common case, though, we do a double-checked pattern
+ // where we have the fast path if the front block is not empty, then read the tail block,
+ // then re-read the front block and check if it's not empty again, then check if the tail
+ // block has advanced.
+
+ Block* frontBlock_ = frontBlock.load();
+ size_t blockTail = frontBlock_->localTail;
+ size_t blockFront = frontBlock_->front.load();
+
+ if (blockFront != blockTail || blockFront != (frontBlock_->localTail = frontBlock_->tail.load())) {
+ fence(memory_order_acquire);
+
+ non_empty_front_block:
+ // Front block not empty, dequeue from here
+ auto element = reinterpret_cast<T*>(frontBlock_->data + blockFront * sizeof(T));
+ result = std::move(*element);
+ element->~T();
+
+ blockFront = (blockFront + 1) & frontBlock_->sizeMask;
+
+ fence(memory_order_release);
+ frontBlock_->front = blockFront;
+ }
+ else if (frontBlock_ != tailBlock.load()) {
+ fence(memory_order_acquire);
+
+ frontBlock_ = frontBlock.load();
+ blockTail = frontBlock_->localTail = frontBlock_->tail.load();
+ blockFront = frontBlock_->front.load();
+ fence(memory_order_acquire);
+
+ if (blockFront != blockTail) {
+ // Oh look, the front block isn't empty after all
+ goto non_empty_front_block;
+ }
+
+ // Front block is empty but there's another block ahead, advance to it
+ Block* nextBlock = frontBlock_->next;
+ // Don't need an acquire fence here since next can only ever be set on the tailBlock,
+ // and we're not the tailBlock, and we did an acquire earlier after reading tailBlock which
+ // ensures next is up-to-date on this CPU in case we recently were at tailBlock.
+
+ size_t nextBlockFront = nextBlock->front.load();
+ size_t nextBlockTail = nextBlock->localTail = nextBlock->tail.load();
+ fence(memory_order_acquire);
+
+ // Since the tailBlock is only ever advanced after being written to,
+ // we know there's for sure an element to dequeue on it
+ assert(nextBlockFront != nextBlockTail);
+ AE_UNUSED(nextBlockTail);
+
+ // We're done with this block, let the producer use it if it needs
+ fence(memory_order_release); // Expose possibly pending changes to frontBlock->front from last dequeue
+ frontBlock = frontBlock_ = nextBlock;
+
+ compiler_fence(memory_order_release); // Not strictly needed
+
+ auto element = reinterpret_cast<T*>(frontBlock_->data + nextBlockFront * sizeof(T));
+
+ result = std::move(*element);
+ element->~T();
+
+ nextBlockFront = (nextBlockFront + 1) & frontBlock_->sizeMask;
+
+ fence(memory_order_release);
+ frontBlock_->front = nextBlockFront;
+ }
+ else {
+ // No elements in current block and no other block to advance to
+ return false;
+ }
+
+ return true;
+ }
+
+
+ // Returns a pointer to the front element in the queue (the one that
+ // would be removed next by a call to `try_dequeue` or `pop`). If the
+ // queue appears empty at the time the method is called, nullptr is
+ // returned instead.
+ // Must be called only from the consumer thread.
+ T* peek()
+ {
+#ifndef NDEBUG
+ ReentrantGuard guard(this->dequeuing);
+#endif
+ // See try_dequeue() for reasoning
+
+ Block* frontBlock_ = frontBlock.load();
+ size_t blockTail = frontBlock_->localTail;
+ size_t blockFront = frontBlock_->front.load();
+
+ if (blockFront != blockTail || blockFront != (frontBlock_->localTail = frontBlock_->tail.load())) {
+ fence(memory_order_acquire);
+ non_empty_front_block:
+ return reinterpret_cast<T*>(frontBlock_->data + blockFront * sizeof(T));
+ }
+ else if (frontBlock_ != tailBlock.load()) {
+ fence(memory_order_acquire);
+ frontBlock_ = frontBlock.load();
+ blockTail = frontBlock_->localTail = frontBlock_->tail.load();
+ blockFront = frontBlock_->front.load();
+ fence(memory_order_acquire);
+
+ if (blockFront != blockTail) {
+ goto non_empty_front_block;
+ }
+
+ Block* nextBlock = frontBlock_->next;
+
+ size_t nextBlockFront = nextBlock->front.load();
+ fence(memory_order_acquire);
+
+ assert(nextBlockFront != nextBlock->tail.load());
+ return reinterpret_cast<T*>(nextBlock->data + nextBlockFront * sizeof(T));
+ }
+
+ return nullptr;
+ }
+
+ // Removes the front element from the queue, if any, without returning it.
+ // Returns true on success, or false if the queue appeared empty at the time
+ // `pop` was called.
+ bool pop()
+ {
+#ifndef NDEBUG
+ ReentrantGuard guard(this->dequeuing);
+#endif
+ // See try_dequeue() for reasoning
+
+ Block* frontBlock_ = frontBlock.load();
+ size_t blockTail = frontBlock_->localTail;
+ size_t blockFront = frontBlock_->front.load();
+
+ if (blockFront != blockTail || blockFront != (frontBlock_->localTail = frontBlock_->tail.load())) {
+ fence(memory_order_acquire);
+
+ non_empty_front_block:
+ auto element = reinterpret_cast<T*>(frontBlock_->data + blockFront * sizeof(T));
+ element->~T();
+
+ blockFront = (blockFront + 1) & frontBlock_->sizeMask;
+
+ fence(memory_order_release);
+ frontBlock_->front = blockFront;
+ }
+ else if (frontBlock_ != tailBlock.load()) {
+ fence(memory_order_acquire);
+ frontBlock_ = frontBlock.load();
+ blockTail = frontBlock_->localTail = frontBlock_->tail.load();
+ blockFront = frontBlock_->front.load();
+ fence(memory_order_acquire);
+
+ if (blockFront != blockTail) {
+ goto non_empty_front_block;
+ }
+
+ // Front block is empty but there's another block ahead, advance to it
+ Block* nextBlock = frontBlock_->next;
+
+ size_t nextBlockFront = nextBlock->front.load();
+ size_t nextBlockTail = nextBlock->localTail = nextBlock->tail.load();
+ fence(memory_order_acquire);
+
+ assert(nextBlockFront != nextBlockTail);
+ AE_UNUSED(nextBlockTail);
+
+ fence(memory_order_release);
+ frontBlock = frontBlock_ = nextBlock;
+
+ compiler_fence(memory_order_release);
+
+ auto element = reinterpret_cast<T*>(frontBlock_->data + nextBlockFront * sizeof(T));
+ element->~T();
+
+ nextBlockFront = (nextBlockFront + 1) & frontBlock_->sizeMask;
+
+ fence(memory_order_release);
+ frontBlock_->front = nextBlockFront;
+ }
+ else {
+ // No elements in current block and no other block to advance to
+ return false;
+ }
+
+ return true;
+ }
+
+ // Returns the approximate number of items currently in the queue.
+ // Safe to call from both the producer and consumer threads.
+ inline size_t size_approx() const
+ {
+ size_t result = 0;
+ Block* frontBlock_ = frontBlock.load();
+ Block* block = frontBlock_;
+ do {
+ fence(memory_order_acquire);
+ size_t blockFront = block->front.load();
+ size_t blockTail = block->tail.load();
+ result += (blockTail - blockFront) & block->sizeMask;
+ block = block->next.load();
+ } while (block != frontBlock_);
+ return result;
+ }
+
+
+private:
+ enum AllocationMode { CanAlloc, CannotAlloc };
+
+ template<AllocationMode canAlloc, typename U>
+ bool inner_enqueue(U&& element)
+ {
+#ifndef NDEBUG
+ ReentrantGuard guard(this->enqueuing);
+#endif
+
+ // High-level pseudocode (assuming we're allowed to alloc a new block):
+ // If room in tail block, add to tail
+ // Else check next block
+ // If next block is not the head block, enqueue on next block
+ // Else create a new block and enqueue there
+ // Advance tail to the block we just enqueued to
+
+ Block* tailBlock_ = tailBlock.load();
+ size_t blockFront = tailBlock_->localFront;
+ size_t blockTail = tailBlock_->tail.load();
+
+ size_t nextBlockTail = (blockTail + 1) & tailBlock_->sizeMask;
+ if (nextBlockTail != blockFront || nextBlockTail != (tailBlock_->localFront = tailBlock_->front.load())) {
+ fence(memory_order_acquire);
+ // This block has room for at least one more element
+ char* location = tailBlock_->data + blockTail * sizeof(T);
+ new (location) T(std::forward<U>(element));
+
+ fence(memory_order_release);
+ tailBlock_->tail = nextBlockTail;
+ }
+ else {
+ fence(memory_order_acquire);
+ if (tailBlock_->next.load() != frontBlock) {
+ // Note that the reason we can't advance to the frontBlock and start adding new entries there
+ // is because if we did, then dequeue would stay in that block, eventually reading the new values,
+ // instead of advancing to the next full block (whose values were enqueued first and so should be
+ // consumed first).
+
+ fence(memory_order_acquire); // Ensure we get latest writes if we got the latest frontBlock
+
+ // tailBlock is full, but there's a free block ahead, use it
+ Block* tailBlockNext = tailBlock_->next.load();
+ size_t nextBlockFront = tailBlockNext->localFront = tailBlockNext->front.load();
+ nextBlockTail = tailBlockNext->tail.load();
+ fence(memory_order_acquire);
+
+ // This block must be empty since it's not the head block and we
+ // go through the blocks in a circle
+ assert(nextBlockFront == nextBlockTail);
+ tailBlockNext->localFront = nextBlockFront;
+
+ char* location = tailBlockNext->data + nextBlockTail * sizeof(T);
+ new (location) T(std::forward<U>(element));
+
+ tailBlockNext->tail = (nextBlockTail + 1) & tailBlockNext->sizeMask;
+
+ fence(memory_order_release);
+ tailBlock = tailBlockNext;
+ }
+ else if (canAlloc == CanAlloc) {
+ // tailBlock is full and there's no free block ahead; create a new block
+ auto newBlockSize = largestBlockSize >= MAX_BLOCK_SIZE ? largestBlockSize : largestBlockSize * 2;
+ auto newBlock = make_block(newBlockSize);
+ if (newBlock == nullptr) {
+ // Could not allocate a block!
+ return false;
+ }
+ largestBlockSize = newBlockSize;
+
+ new (newBlock->data) T(std::forward<U>(element));
+
+ assert(newBlock->front == 0);
+ newBlock->tail = newBlock->localTail = 1;
+
+ newBlock->next = tailBlock_->next.load();
+ tailBlock_->next = newBlock;
+
+ // Might be possible for the dequeue thread to see the new tailBlock->next
+ // *without* seeing the new tailBlock value, but this is OK since it can't
+ // advance to the next block until tailBlock is set anyway (because the only
+ // case where it could try to read the next is if it's already at the tailBlock,
+ // and it won't advance past tailBlock in any circumstance).
+
+ fence(memory_order_release);
+ tailBlock = newBlock;
+ }
+ else if (canAlloc == CannotAlloc) {
+ // Would have had to allocate a new block to enqueue, but not allowed
+ return false;
+ }
+ else {
+ assert(false && "Should be unreachable code");
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+
+ // Disable copying
+ ReaderWriterQueue(ReaderWriterQueue const&) { }
+
+ // Disable assignment
+ ReaderWriterQueue& operator=(ReaderWriterQueue const&) { }
+
+
+
+ AE_FORCEINLINE static size_t ceilToPow2(size_t x)
+ {
+ // From http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
+ --x;
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ for (size_t i = 1; i < sizeof(size_t); i <<= 1) {
+ x |= x >> (i << 3);
+ }
+ ++x;
+ return x;
+ }
+
+ template<typename U>
+ static AE_FORCEINLINE char* align_for(char* ptr)
+ {
+ const std::size_t alignment = std::alignment_of<U>::value;
+ return ptr + (alignment - (reinterpret_cast<std::uintptr_t>(ptr) % alignment)) % alignment;
+ }
+private:
+#ifndef NDEBUG
+ struct ReentrantGuard
+ {
+ ReentrantGuard(bool& _inSection)
+ : inSection(_inSection)
+ {
+ assert(!inSection);
+ if (inSection) {
+ throw std::runtime_error("ReaderWriterQueue does not support enqueuing or dequeuing elements from other elements' ctors and dtors");
+ }
+
+ inSection = true;
+ }
+
+ ~ReentrantGuard() { inSection = false; }
+
+ private:
+ ReentrantGuard& operator=(ReentrantGuard const&);
+
+ private:
+ bool& inSection;
+ };
+#endif
+
+ struct Block
+ {
+ // Avoid false-sharing by putting highly contended variables on their own cache lines
+ weak_atomic<size_t> front; // (Atomic) Elements are read from here
+ size_t localTail; // An uncontended shadow copy of tail, owned by the consumer
+
+ char cachelineFiller0[CACHE_LINE_SIZE - sizeof(weak_atomic<size_t>) - sizeof(size_t)];
+ weak_atomic<size_t> tail; // (Atomic) Elements are enqueued here
+ size_t localFront;
+
+ char cachelineFiller1[CACHE_LINE_SIZE - sizeof(weak_atomic<size_t>) - sizeof(size_t)]; // next isn't very contended, but we don't want it on the same cache line as tail (which is)
+ weak_atomic<Block*> next; // (Atomic)
+
+ char* data; // Contents (on heap) are aligned to T's alignment
+
+ const size_t sizeMask;
+
+
+ // size must be a power of two (and greater than 0)
+ Block(size_t const& _size, char* _rawThis, char* _data)
+ : front(0), localTail(0), tail(0), localFront(0), next(nullptr), data(_data), sizeMask(_size - 1), rawThis(_rawThis)
+ {
+ }
+
+ private:
+ // C4512 - Assignment operator could not be generated
+ Block& operator=(Block const&);
+
+ public:
+ char* rawThis;
+ };
+
+
+ static Block* make_block(size_t capacity)
+ {
+ // Allocate enough memory for the block itself, as well as all the elements it will contain
+ auto size = sizeof(Block) + std::alignment_of<Block>::value - 1;
+ size += sizeof(T) * capacity + std::alignment_of<T>::value - 1;
+ auto newBlockRaw = static_cast<char*>(std::malloc(size));
+ if (newBlockRaw == nullptr) {
+ return nullptr;
+ }
+
+ auto newBlockAligned = align_for<Block>(newBlockRaw);
+ auto newBlockData = align_for<T>(newBlockAligned + sizeof(Block));
+ return new (newBlockAligned) Block(capacity, newBlockRaw, newBlockData);
+ }
+
+private:
+ weak_atomic<Block*> frontBlock; // (Atomic) Elements are enqueued to this block
+
+ char cachelineFiller[CACHE_LINE_SIZE - sizeof(weak_atomic<Block*>)];
+ weak_atomic<Block*> tailBlock; // (Atomic) Elements are dequeued from this block
+
+ size_t largestBlockSize;
+
+#ifndef NDEBUG
+ bool enqueuing;
+ bool dequeuing;
+#endif
+};
+
+// Like ReaderWriterQueue, but also providees blocking operations
+template<typename T, size_t MAX_BLOCK_SIZE = 512>
+class BlockingReaderWriterQueue
+{
+private:
+ typedef ::moodycamel::ReaderWriterQueue<T, MAX_BLOCK_SIZE> ReaderWriterQueue;
+
+public:
+ explicit BlockingReaderWriterQueue(size_t maxSize = 15)
+ : inner(maxSize)
+ { }
+
+
+ // Enqueues a copy of element if there is room in the queue.
+ // Returns true if the element was enqueued, false otherwise.
+ // Does not allocate memory.
+ AE_FORCEINLINE bool try_enqueue(T const& element)
+ {
+ if (inner.try_enqueue(element)) {
+ sema.signal();
+ return true;
+ }
+ return false;
+ }
+
+ // Enqueues a moved copy of element if there is room in the queue.
+ // Returns true if the element was enqueued, false otherwise.
+ // Does not allocate memory.
+ AE_FORCEINLINE bool try_enqueue(T&& element)
+ {
+ if (inner.try_enqueue(std::forward<T>(element))) {
+ sema.signal();
+ return true;
+ }
+ return false;
+ }
+
+
+ // Enqueues a copy of element on the queue.
+ // Allocates an additional block of memory if needed.
+ // Only fails (returns false) if memory allocation fails.
+ AE_FORCEINLINE bool enqueue(T const& element)
+ {
+ if (inner.enqueue(element)) {
+ sema.signal();
+ return true;
+ }
+ return false;
+ }
+
+ // Enqueues a moved copy of element on the queue.
+ // Allocates an additional block of memory if needed.
+ // Only fails (returns false) if memory allocation fails.
+ AE_FORCEINLINE bool enqueue(T&& element)
+ {
+ if (inner.enqueue(std::forward<T>(element))) {
+ sema.signal();
+ return true;
+ }
+ return false;
+ }
+
+
+ // Attempts to dequeue an element; if the queue is empty,
+ // returns false instead. If the queue has at least one element,
+ // moves front to result using operator=, then returns true.
+ template<typename U>
+ bool try_dequeue(U& result)
+ {
+ if (sema.tryWait()) {
+ bool success = inner.try_dequeue(result);
+ assert(success);
+ AE_UNUSED(success);
+ return true;
+ }
+ return false;
+ }
+
+
+ // Attempts to dequeue an element; if the queue is empty,
+ // waits until an element is available, then dequeues it.
+ template<typename U>
+ void wait_dequeue(U& result)
+ {
+ sema.wait();
+ bool success = inner.try_dequeue(result);
+ AE_UNUSED(result);
+ assert(success);
+ AE_UNUSED(success);
+ }
+
+
+ // Returns a pointer to the front element in the queue (the one that
+ // would be removed next by a call to `try_dequeue` or `pop`). If the
+ // queue appears empty at the time the method is called, nullptr is
+ // returned instead.
+ // Must be called only from the consumer thread.
+ AE_FORCEINLINE T* peek()
+ {
+ return inner.peek();
+ }
+
+ // Removes the front element from the queue, if any, without returning it.
+ // Returns true on success, or false if the queue appeared empty at the time
+ // `pop` was called.
+ AE_FORCEINLINE bool pop()
+ {
+ if (sema.tryWait()) {
+ bool result = inner.pop();
+ assert(result);
+ AE_UNUSED(result);
+ return true;
+ }
+ return false;
+ }
+
+ // Returns the approximate number of items currently in the queue.
+ // Safe to call from both the producer and consumer threads.
+ AE_FORCEINLINE size_t size_approx() const
+ {
+ return sema.availableApprox();
+ }
+
+
+private:
+ // Disable copying & assignment
+ BlockingReaderWriterQueue(ReaderWriterQueue const&) { }
+ BlockingReaderWriterQueue& operator=(ReaderWriterQueue const&) { }
+
+private:
+ ReaderWriterQueue inner;
+ spsc_sema::LightweightSemaphore sema;
+};
+
+} // end namespace moodycamel
+
+#ifdef AE_VCPP
+#pragma warning(pop)
+#endif
diff --git a/lib/sink_impl.cc b/lib/sink_impl.cc
index f291b95..285638b 100644
--- a/lib/sink_impl.cc
+++ b/lib/sink_impl.cc
@@ -47,6 +47,9 @@
#ifdef ENABLE_REDPITAYA
#include "redpitaya_sink_c.h"
#endif
+#ifdef ENABLE_FREESRP
+#include <freesrp_sink_c.h>
+#endif
#ifdef ENABLE_FILE
#include "file_sink_c.h"
#endif
@@ -99,6 +102,9 @@ sink_impl::sink_impl( const std::string &args )
#ifdef ENABLE_REDPITAYA
dev_types.push_back("redpitaya");
#endif
+#ifdef ENABLE_FREESRP
+ dev_types.push_back("freesrp");
+#endif
#ifdef ENABLE_FILE
dev_types.push_back("file");
#endif
@@ -145,6 +151,10 @@ sink_impl::sink_impl( const std::string &args )
BOOST_FOREACH( std::string dev, redpitaya_sink_c::get_devices() )
dev_list.push_back( dev );
#endif
+#ifdef ENABLE_FREESRP
+ BOOST_FOREACH( std::string dev, freesrp_sink_c::get_devices() )
+ dev_list.push_back( dev );
+#endif
#ifdef ENABLE_FILE
BOOST_FOREACH( std::string dev, file_sink_c::get_devices() )
dev_list.push_back( dev );
@@ -201,6 +211,12 @@ sink_impl::sink_impl( const std::string &args )
block = sink; iface = sink.get();
}
#endif
+#ifdef ENABLE_FREESRP
+ if ( dict.count("freesrp") ) {
+ freesrp_sink_c_sptr sink = make_freesrp_sink_c( arg );
+ block = sink; iface = sink.get();
+ }
+#endif
#ifdef ENABLE_FILE
if ( dict.count("file") ) {
file_sink_c_sptr sink = make_file_sink_c( arg );
diff --git a/lib/source_impl.cc b/lib/source_impl.cc
index 3aa17f9..a28f314 100644
--- a/lib/source_impl.cc
+++ b/lib/source_impl.cc
@@ -88,6 +88,11 @@
#include <redpitaya_source_c.h>
#endif
+#ifdef ENABLE_FREESRP
+#include <freesrp_source_c.h>
+#endif
+
+
#include "arg_helpers.h"
#include "source_impl.h"
@@ -163,6 +168,9 @@ source_impl::source_impl( const std::string &args )
#ifdef ENABLE_REDPITAYA
dev_types.push_back("redpitaya");
#endif
+#ifdef ENABLE_FREESRP
+ dev_types.push_back("freesrp");
+#endif
std::cerr << "gr-osmosdr "
<< GR_OSMOSDR_VERSION << " (" << GR_OSMOSDR_LIBVER << ") "
<< "gnuradio " << gr::version() << std::endl;
@@ -240,6 +248,10 @@ source_impl::source_impl( const std::string &args )
BOOST_FOREACH( std::string dev, redpitaya_source_c::get_devices() )
dev_list.push_back( dev );
#endif
+#ifdef ENABLE_FREESRP
+ BOOST_FOREACH( std::string dev, freesrp_source_c::get_devices() )
+ dev_list.push_back( dev );
+#endif
// std::cerr << std::endl;
// BOOST_FOREACH( std::string dev, dev_list )
@@ -364,6 +376,13 @@ source_impl::source_impl( const std::string &args )
}
#endif
+#ifdef ENABLE_FREESRP
+ if ( dict.count("freesrp") ) {
+ freesrp_source_c_sptr src = make_freesrp_source_c( arg );
+ block = src; iface = src.get();
+ }
+#endif
+
if ( iface != NULL && long(block.get()) != 0 ) {
_devs.push_back( iface );