aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorptrkrysik <ptrkrysik@gmail.com>2015-06-20 09:22:01 +0200
committerptrkrysik <ptrkrysik@gmail.com>2015-06-20 09:22:01 +0200
commit9ee374b53e2149d77645a64b3aa2ff5cc0c89d14 (patch)
treed90fd01e72f1fba11a90b84a494300ca2f6688d0 /lib
parent42bbbd3ac79808a30216f68471d9656d1940e493 (diff)
parent601dca339647251c49f6e053d9544c286e5850e1 (diff)
Merge remote-tracking branch 'origin/romankh-master'
with implementation of AMR decoding Conflicts: lib/decoding/tch_f_decoder_impl.cc lib/decoding/tch_f_decoder_impl.h
Diffstat (limited to 'lib')
-rw-r--r--lib/CMakeLists.txt8
-rw-r--r--lib/decoding/AmrCoder.cpp1887
-rw-r--r--lib/decoding/AmrCoder.h936
-rw-r--r--lib/decoding/BitVector.cpp548
-rw-r--r--lib/decoding/BitVector.h340
-rw-r--r--lib/decoding/GSM503Tables.cpp322
-rw-r--r--lib/decoding/GSM503Tables.h71
-rw-r--r--lib/decoding/GSM690Tables.cpp49
-rw-r--r--lib/decoding/GSM690Tables.h31
-rw-r--r--lib/decoding/Vector.h580
-rw-r--r--lib/decoding/Viterbi.h35
-rw-r--r--lib/decoding/ViterbiR204.cpp306
-rw-r--r--lib/decoding/ViterbiR204.h149
-rw-r--r--lib/decoding/VocoderFrame.h43
-rw-r--r--lib/decoding/tch_f_decoder_impl.cc274
-rw-r--r--lib/decoding/tch_f_decoder_impl.h38
16 files changed, 4764 insertions, 853 deletions
diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt
index 2c18d6c..4a88edb 100644
--- a/lib/CMakeLists.txt
+++ b/lib/CMakeLists.txt
@@ -26,8 +26,8 @@ include_directories(${Boost_INCLUDE_DIR} receiver)
link_directories(${Boost_LIBRARY_DIRS})
list(APPEND grgsm_sources
receiver/receiver_impl.cc
- receiver/receiver_config.cc
- receiver/viterbi_detector.cc
+ receiver/receiver_config.cc
+ receiver/viterbi_detector.cc
receiver/sch.c
receiver/clock_offset_control_impl.cc
receiver/cx_channel_hopper_impl.cc
@@ -39,10 +39,12 @@ list(APPEND grgsm_sources
decoding/cch.c
decoding/fire_crc.c
decoding/tch_f_decoder_impl.cc
+ decoding/AmrCoder.cpp
decoding/BitVector.cpp
decoding/GSM610Tables.cpp
decoding/GSM660Tables.cpp
- decoding/GSM690Tables.cpp
+ decoding/GSM503Tables.cpp
+ decoding/ViterbiR204.cpp
misc_utils/controlled_rotator_cc_impl.cc
misc_utils/controlled_const_source_f_impl.cc
misc_utils/message_printer_impl.cc
diff --git a/lib/decoding/AmrCoder.cpp b/lib/decoding/AmrCoder.cpp
new file mode 100644
index 0000000..d02e69c
--- /dev/null
+++ b/lib/decoding/AmrCoder.cpp
@@ -0,0 +1,1887 @@
+/*
+* Copyright 2013, 2014 Range Networks, Inc.
+*
+* This software is distributed under multiple licenses;
+* see the COPYING file in the main directory for licensing
+* information for this specific distribution.
+*
+* This use of this software may be subject to additional restrictions.
+* See the LEGAL file in the main directory for details.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+*/
+
+
+#include "BitVector.h"
+#include "AmrCoder.h"
+#include <iostream>
+#include <stdio.h>
+#include <sstream>
+
+using namespace std;
+
+
+
+ViterbiTCH_AFS12_2::ViterbiTCH_AFS12_2()
+{
+ assert(mDeferral < 32);
+ mCoeffs[0] = 0x019;
+ mCoeffsFB[0] = 0x019;
+ mCoeffs[1] = 0x01b;
+ mCoeffsFB[1] = 0x019;
+ for (unsigned i = 0; i < mIRate; i++) {
+ computeStateTables(i);
+ }
+ computeGeneratorTable();
+}
+
+
+//void BitVector::encode(const ViterbiTCH_AFS12_2& coder, BitVector& target) const
+void ViterbiTCH_AFS12_2::encode(const BitVector& in, BitVector& target) const
+{
+ assert(in.size() == 250);
+ assert(target.size() == 508);
+ const char *u = in.begin();
+ char *C = target.begin();
+ const unsigned H = 4;
+ BitVector r(254+H);
+ for (int k = -H; k <= -1; k++) r[k+H] = 0;
+ for (unsigned k = 0; k <= 249; k++) {
+ r[k+H] = u[k] ^ r[k-3+H] ^ r[k-4+H];
+ C[2*k] = u[k];
+ C[2*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+ }
+ // termination
+ for (unsigned k = 250; k <= 253; k++) {
+ r[k+H] = 0;
+ C[2*k] = r[k-3+H] ^ r[k-4+H];
+ C[2*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+ }
+}
+
+
+
+//void BitVector::encode(const ViterbiTCH_AFS10_2& coder, BitVector& target)
+void ViterbiTCH_AFS10_2::encode(const BitVector& in, BitVector& target) const
+{
+ assert(in.size() == 210);
+ assert(target.size() == 642);
+ const char *u = in.begin();
+ char *C = target.begin();
+ const unsigned H = 4;
+ BitVector r(214+H);
+ for (int k = -H; k <= -1; k++) r[k+H] = 0;
+ for (unsigned k = 0; k <= 209; k++) {
+ r[k+H] = u[k] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
+ C[3*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+ C[3*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
+ C[3*k+2] = u[k];
+ }
+ // termination
+ for (unsigned k = 210; k <= 213; k++) {
+ r[k+H] = 0;
+ C[3*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+ C[3*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
+ C[3*k+2] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
+ }
+}
+
+
+
+//void BitVector::encode(const ViterbiTCH_AFS7_95& coder, BitVector& target)
+void ViterbiTCH_AFS7_95::encode(const BitVector& in, BitVector& target) const
+{
+ assert(in.size() == 165);
+ assert(target.size() == 513);
+ const char *u = in.begin();
+ char *C = target.begin();
+ const unsigned H = 6;
+ BitVector r(171+H);
+ for (int k = -H; k <= -1; k++) r[k+H] = 0;
+ for (unsigned k = 0; k <= 164; k++) {
+ r[k+H] = u[k] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
+ C[3*k] = u[k];
+ C[3*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-4+H] ^ r[k-6+H];
+ C[3*k+2] = r[k+H] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
+ }
+ // termination
+ for (unsigned k = 165; k <= 170; k++) {
+ r[k+H] = 0;
+ C[3*k] = r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
+ C[3*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-4+H] ^ r[k-6+H];
+ C[3*k+2] = r[k+H] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
+ }
+}
+
+
+
+void ViterbiTCH_AFS7_4::encode(const BitVector& in, BitVector& target) const
+{
+ assert(in.size() == 154);
+ assert(target.size() == 474);
+ const char *u = in.begin();
+ char *C = target.begin();
+ const unsigned H = 4;
+ BitVector r(158+H);
+ for (int k = -H; k <= -1; k++) r[k+H] = 0;
+ for (unsigned k = 0; k <= 153; k++) {
+ r[k+H] = u[k] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
+ C[3*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+ C[3*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
+ C[3*k+2] = u[k];
+ }
+ // termination
+ for (unsigned k = 154; k <= 157; k++) {
+ r[k+H] = 0;
+ C[3*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+ C[3*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
+ C[3*k+2] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
+ }
+}
+
+
+
+void ViterbiTCH_AFS6_7::encode(const BitVector& in, BitVector& target) const
+{
+ assert(in.size() == 140);
+ assert(target.size() == 576);
+ const char *u = in.begin();
+ char *C = target.begin();
+ const unsigned H = 4;
+ BitVector r(144+H);
+ for (int k = -H; k <= -1; k++) r[k+H] = 0;
+ for (unsigned k = 0; k <= 139; k++) {
+ r[k+H] = u[k] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
+ C[4*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+ C[4*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
+ C[4*k+2] = u[k];
+ C[4*k+3] = u[k];
+ }
+ // termination
+ for (unsigned k = 140; k <= 143; k++) {
+ r[k+H] = 0;
+ C[4*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+ C[4*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
+ C[4*k+2] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
+ C[4*k+3] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
+ }
+}
+
+
+
+void ViterbiTCH_AFS5_9::encode(const BitVector& in, BitVector& target) const
+{
+ assert(in.size() == 124);
+ assert(target.size() == 520);
+ const char *u = in.begin();
+ char *C = target.begin();
+ const unsigned H = 6;
+ BitVector r(130+H);
+ for (int k = -H; k <= -1; k++) r[k+H] = 0;
+ for (unsigned k = 0; k <= 123; k++) {
+ r[k+H] = u[k] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
+ C[4*k] = r[k+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
+ C[4*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-4+H] ^ r[k-6+H];
+ C[4*k+2] = u[k];
+ C[4*k+3] = u[k];
+ }
+ // termination
+ for (unsigned k = 124; k <= 129; k++) {
+ r[k+H] = 0;
+ C[4*k] = r[k+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
+ C[4*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-4+H] ^ r[k-6+H];
+ C[4*k+2] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
+ C[4*k+3] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
+ }
+}
+
+
+
+void ViterbiTCH_AFS5_15::encode(const BitVector& in, BitVector& target) const
+{
+ assert(in.size() == 109);
+ assert(target.size() == 565);
+ const char *u = in.begin();
+ char *C = target.begin();
+ const unsigned H = 4;
+ BitVector r(113+H);
+ for (int k = -H; k <= -1; k++) r[k+H] = 0;
+ for (unsigned k = 0; k <= 108; k++) {
+ r[k+H] = u[k] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
+ C[5*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+ C[5*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+ C[5*k+2] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
+ C[5*k+3] = u[k];
+ C[5*k+4] = u[k];
+ }
+ // termination
+ for (unsigned k = 109; k <= 112; k++) {
+ r[k+H] = 0;
+ C[5*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+ C[5*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+ C[5*k+2] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
+ C[5*k+3] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
+ C[5*k+4] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
+ }
+}
+
+
+
+void ViterbiTCH_AFS4_75::encode(const BitVector& in, BitVector& target) const
+{
+ assert(in.size() == 101);
+ assert(target.size() == 535);
+ const char *u = in.begin();
+ char *C = target.begin();
+ const unsigned H = 6;
+ BitVector r(107+H);
+ for (int k = -H; k <= -1; k++) r[k+H] = 0;
+ for (unsigned k = 0; k <= 100; k++) {
+ r[k+H] = u[k] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
+ C[5*k] = r[k+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
+ C[5*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
+ C[5*k+2] = r[k+H] ^ r[k-1+H] ^ r[k-4+H] ^ r[k-6+H];
+ C[5*k+3] = u[k];
+ C[5*k+4] = u[k];
+ }
+ // termination
+ for (unsigned k = 101; k <= 106; k++) {
+ r[k+H] = 0;
+ C[5*k] = r[k+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
+ C[5*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
+ C[5*k+2] = r[k+H] ^ r[k-1+H] ^ r[k-4+H] ^ r[k-6+H];
+ C[5*k+3] = r[k+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
+ C[5*k+4] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
+ }
+}
+
+
+void ViterbiTCH_AFS12_2::initializeStates()
+{
+ for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
+ for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
+}
+
+
+
+void ViterbiTCH_AFS12_2::computeStateTables(unsigned g)
+{
+ assert(g<mIRate);
+ for (unsigned state=0; state<mIStates; state++) {
+ for (unsigned in = 0; in <= 1; in++) {
+ uint32_t inputVal = (state<<1) | in;
+ mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
+ }
+ }
+}
+
+void ViterbiTCH_AFS12_2::computeGeneratorTable()
+{
+ for (unsigned index=0; index<mIStates*2; index++) {
+ uint32_t t = 0;
+ for (unsigned i = 0; i < mIRate; i++) {
+ t = (t << 1) | mStateTable[i][index];
+ }
+ mGeneratorTable[index] = t;
+ }
+}
+
+
+
+
+
+
+void ViterbiTCH_AFS12_2::branchCandidates()
+{
+ // Branch to generate new input states.
+ const vCand *sp = mSurvivors;
+ for (unsigned cand=0; cand<mNumCands; cand+=2) {
+ uint32_t oStateShifted = (sp->oState) << mIRate;
+ for (unsigned in = 0; in <= 1; in++) {
+ mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
+ mCandidates[cand+in].cost = sp->cost;
+ uint32_t outputs = oStateShifted;
+ for (unsigned out = 0; out < mIRate; out++) {
+ char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
+ char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
+ mCandidates[cand+in].rState[out] = rState;
+ outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
+ }
+ mCandidates[cand+in].oState = outputs;
+ }
+ sp++;
+ }
+}
+
+
+void ViterbiTCH_AFS12_2::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
+{
+ const float *cTab[2] = {matchCost,mismatchCost};
+ for (unsigned i=0; i<mNumCands; i++) {
+ vCand& thisCand = mCandidates[i];
+ const unsigned mismatched = inSample ^ (thisCand.oState);
+ for (unsigned i = 0; i < mIRate; i++) {
+ thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
+ }
+ }
+}
+
+
+void ViterbiTCH_AFS12_2::pruneCandidates()
+{
+ const vCand* c1 = mCandidates; // 0-prefix
+ const vCand* c2 = mCandidates + mIStates; // 1-prefix
+ for (unsigned i=0; i<mIStates; i++) {
+ if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
+ else mSurvivors[i] = c2[i];
+ }
+}
+
+
+const ViterbiTCH_AFS12_2::vCand& ViterbiTCH_AFS12_2::minCost() const
+{
+ int minIndex = 0;
+ float minCost = mSurvivors[0].cost;
+ for (unsigned i=1; i<mIStates; i++) {
+ const float thisCost = mSurvivors[i].cost;
+ if (thisCost>=minCost) continue;
+ minCost = thisCost;
+ minIndex=i;
+ }
+ return mSurvivors[minIndex];
+}
+
+
+const ViterbiTCH_AFS12_2::vCand& ViterbiTCH_AFS12_2::step(uint32_t inSample, const float *probs, const float *iprobs)
+{
+ branchCandidates();
+ getSoftCostMetrics(inSample,probs,iprobs);
+ pruneCandidates();
+ return minCost();
+}
+
+
+
+void ViterbiTCH_AFS12_2::decode(const SoftVector &in, BitVector& target)
+{
+ ViterbiTCH_AFS12_2 &decoder = *this;
+ const size_t sz = in.size() - 8;
+ const unsigned deferral = decoder.deferral();
+ const size_t ctsz = sz + deferral*decoder.iRate();
+ assert(sz == decoder.iRate()*target.size());
+
+ // Build a "history" array where each element contains the full history.
+ uint32_t history[ctsz];
+ {
+ BitVector bits = in.sliced();
+ uint32_t accum = 0;
+ for (size_t i=0; i<sz; i++) {
+ accum = (accum<<1) | bits.bit(i);
+ history[i] = accum;
+ }
+ // Repeat last bit at the end.
+ for (size_t i=sz; i<ctsz; i++) {
+ accum = (accum<<1) | (accum & 0x01);
+ history[i] = accum;
+ }
+ }
+
+ // Precompute metric tables.
+ float matchCostTable[ctsz];
+ float mismatchCostTable[ctsz];
+ {
+ const float *dp = in.begin();
+ for (size_t i=0; i<sz; i++) {
+ // pVal is the probability that a bit is correct.
+ // ipVal is the probability that a bit is incorrect.
+ float pVal = dp[i];
+ if (pVal>0.5F) pVal = 1.0F-pVal;
+ float ipVal = 1.0F-pVal;
+ // This is a cheap approximation to an ideal cost function.
+ if (pVal<0.01F) pVal = 0.01;
+ if (ipVal<0.01F) ipVal = 0.01;
+ matchCostTable[i] = 0.25F/ipVal;
+ mismatchCostTable[i] = 0.25F/pVal;
+ }
+
+ // pad end of table with unknowns
+ for (size_t i=sz; i<ctsz; i++) {
+ matchCostTable[i] = 0.5F;
+ mismatchCostTable[i] = 0.5F;
+ }
+ }
+
+ {
+ decoder.initializeStates();
+ // Each sample of history[] carries its history.
+ // So we only have to process every iRate-th sample.
+ const unsigned step = decoder.iRate();
+ // input pointer
+ const uint32_t *ip = history + step - 1;
+ // output pointers
+ char *op = target.begin();
+ const char *const opt = target.end();
+ // table pointers
+ const float* match = matchCostTable;
+ const float* mismatch = mismatchCostTable;
+ size_t oCount = 0;
+ while (op<opt) {
+ // Viterbi algorithm
+ assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
+ assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
+ const ViterbiTCH_AFS12_2::vCand &minCost = decoder.step(*ip, match, mismatch);
+ ip += step;
+ match += step;
+ mismatch += step;
+ // output
+ if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
+ oCount++;
+ }
+ }
+}
+
+
+
+ViterbiTCH_AFS10_2::ViterbiTCH_AFS10_2()
+{
+ assert(mDeferral < 32);
+ mCoeffs[0] = 0x01b;
+ mCoeffsFB[0] = 0x01f;
+ mCoeffs[1] = 0x015;
+ mCoeffsFB[1] = 0x01f;
+ mCoeffs[2] = 0x01f;
+ mCoeffsFB[2] = 0x01f;
+ for (unsigned i = 0; i < mIRate; i++) {
+ computeStateTables(i);
+ }
+ computeGeneratorTable();
+}
+
+
+
+
+void ViterbiTCH_AFS10_2::initializeStates()
+{
+ for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
+ for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
+}
+
+
+
+void ViterbiTCH_AFS10_2::computeStateTables(unsigned g)
+{
+ assert(g<mIRate);
+ for (unsigned state=0; state<mIStates; state++) {
+ for (unsigned in = 0; in <= 1; in++) {
+ uint32_t inputVal = (state<<1) | in;
+ mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
+ }
+ }
+}
+
+void ViterbiTCH_AFS10_2::computeGeneratorTable()
+{
+ for (unsigned index=0; index<mIStates*2; index++) {
+ uint32_t t = 0;
+ for (unsigned i = 0; i < mIRate; i++) {
+ t = (t << 1) | mStateTable[i][index];
+ }
+ mGeneratorTable[index] = t;
+ }
+}
+
+
+
+
+
+
+void ViterbiTCH_AFS10_2::branchCandidates()
+{
+ // Branch to generate new input states.
+ const vCand *sp = mSurvivors;
+ for (unsigned cand=0; cand<mNumCands; cand+=2) {
+ uint32_t oStateShifted = (sp->oState) << mIRate;
+ for (unsigned in = 0; in <= 1; in++) {
+ mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
+ mCandidates[cand+in].cost = sp->cost;
+ uint32_t outputs = oStateShifted;
+ for (unsigned out = 0; out < mIRate; out++) {
+ char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
+ char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
+ mCandidates[cand+in].rState[out] = rState;
+ outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
+ }
+ mCandidates[cand+in].oState = outputs;
+ }
+ sp++;
+ }
+}
+
+
+void ViterbiTCH_AFS10_2::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
+{
+ const float *cTab[2] = {matchCost,mismatchCost};
+ for (unsigned i=0; i<mNumCands; i++) {
+ vCand& thisCand = mCandidates[i];
+ const unsigned mismatched = inSample ^ (thisCand.oState);
+ for (unsigned i = 0; i < mIRate; i++) {
+ thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
+ }
+ }
+}
+
+
+void ViterbiTCH_AFS10_2::pruneCandidates()
+{
+ const vCand* c1 = mCandidates; // 0-prefix
+ const vCand* c2 = mCandidates + mIStates; // 1-prefix
+ for (unsigned i=0; i<mIStates; i++) {
+ if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
+ else mSurvivors[i] = c2[i];
+ }
+}
+
+
+const ViterbiTCH_AFS10_2::vCand& ViterbiTCH_AFS10_2::minCost() const
+{
+ int minIndex = 0;
+ float minCost = mSurvivors[0].cost;
+ for (unsigned i=1; i<mIStates; i++) {
+ const float thisCost = mSurvivors[i].cost;
+ if (thisCost>=minCost) continue;
+ minCost = thisCost;
+ minIndex=i;
+ }
+ return mSurvivors[minIndex];
+}
+
+
+const ViterbiTCH_AFS10_2::vCand& ViterbiTCH_AFS10_2::step(uint32_t inSample, const float *probs, const float *iprobs)
+{
+ branchCandidates();
+ getSoftCostMetrics(inSample,probs,iprobs);
+ pruneCandidates();
+ return minCost();
+}
+
+
+
+void ViterbiTCH_AFS10_2::decode(const SoftVector &in, BitVector& target)
+{
+ ViterbiTCH_AFS10_2 &decoder = *this;
+ const size_t sz = in.size() - 12;
+ const unsigned deferral = decoder.deferral();
+ const size_t ctsz = sz + deferral*decoder.iRate();
+ assert(sz == decoder.iRate()*target.size());
+
+ // Build a "history" array where each element contains the full history.
+ uint32_t history[ctsz];
+ {
+ BitVector bits = in.sliced();
+ uint32_t accum = 0;
+ for (size_t i=0; i<sz; i++) {
+ accum = (accum<<1) | bits.bit(i);
+ history[i] = accum;
+ }
+ // Repeat last bit at the end.
+ for (size_t i=sz; i<ctsz; i++) {
+ accum = (accum<<1) | (accum & 0x01);
+ history[i] = accum;
+ }
+ }
+
+ // Precompute metric tables.
+ float matchCostTable[ctsz];
+ float mismatchCostTable[ctsz];
+ {
+ const float *dp = in.begin();
+ for (size_t i=0; i<sz; i++) {
+ // pVal is the probability that a bit is correct.
+ // ipVal is the probability that a bit is incorrect.
+ float pVal = dp[i];
+ if (pVal>0.5F) pVal = 1.0F-pVal;
+ float ipVal = 1.0F-pVal;
+ // This is a cheap approximation to an ideal cost function.
+ if (pVal<0.01F) pVal = 0.01;
+ if (ipVal<0.01F) ipVal = 0.01;
+ matchCostTable[i] = 0.25F/ipVal;
+ mismatchCostTable[i] = 0.25F/pVal;
+ }
+
+ // pad end of table with unknowns
+ for (size_t i=sz; i<ctsz; i++) {
+ matchCostTable[i] = 0.5F;
+ mismatchCostTable[i] = 0.5F;
+ }
+ }
+
+ {
+ decoder.initializeStates();
+ // Each sample of history[] carries its history.
+ // So we only have to process every iRate-th sample.
+ const unsigned step = decoder.iRate();
+ // input pointer
+ const uint32_t *ip = history + step - 1;
+ // output pointers
+ char *op = target.begin();
+ const char *const opt = target.end();
+ // table pointers
+ const float* match = matchCostTable;
+ const float* mismatch = mismatchCostTable;
+ size_t oCount = 0;
+ while (op<opt) {
+ // Viterbi algorithm
+ assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
+ assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
+ const ViterbiTCH_AFS10_2::vCand &minCost = decoder.step(*ip, match, mismatch);
+ ip += step;
+ match += step;
+ mismatch += step;
+ // output
+ if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
+ oCount++;
+ }
+ }
+}
+
+
+
+ViterbiTCH_AFS7_95::ViterbiTCH_AFS7_95()
+{
+ assert(mDeferral < 32);
+ mCoeffs[0] = 0x06d;
+ mCoeffsFB[0] = 0x06d;
+ mCoeffs[1] = 0x053;
+ mCoeffsFB[1] = 0x06d;
+ mCoeffs[2] = 0x05f;
+ mCoeffsFB[2] = 0x06d;
+ for (unsigned i = 0; i < mIRate; i++) {
+ computeStateTables(i);
+ }
+ computeGeneratorTable();
+}
+
+
+
+
+void ViterbiTCH_AFS7_95::initializeStates()
+{
+ for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
+ for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
+}
+
+
+
+void ViterbiTCH_AFS7_95::computeStateTables(unsigned g)
+{
+ assert(g<mIRate);
+ for (unsigned state=0; state<mIStates; state++) {
+ for (unsigned in = 0; in <= 1; in++) {
+ uint32_t inputVal = (state<<1) | in;
+ mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
+ }
+ }
+}
+
+void ViterbiTCH_AFS7_95::computeGeneratorTable()
+{
+ for (unsigned index=0; index<mIStates*2; index++) {
+ uint32_t t = 0;
+ for (unsigned i = 0; i < mIRate; i++) {
+ t = (t << 1) | mStateTable[i][index];
+ }
+ mGeneratorTable[index] = t;
+ }
+}
+
+
+
+
+
+
+void ViterbiTCH_AFS7_95::branchCandidates()
+{
+ // Branch to generate new input states.
+ const vCand *sp = mSurvivors;
+ for (unsigned cand=0; cand<mNumCands; cand+=2) {
+ uint32_t oStateShifted = (sp->oState) << mIRate;
+ for (unsigned in = 0; in <= 1; in++) {
+ mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
+ mCandidates[cand+in].cost = sp->cost;
+ uint32_t outputs = oStateShifted;
+ for (unsigned out = 0; out < mIRate; out++) {
+ char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
+ char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
+ mCandidates[cand+in].rState[out] = rState;
+ outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
+ }
+ mCandidates[cand+in].oState = outputs;
+ }
+ sp++;
+ }
+}
+
+
+void ViterbiTCH_AFS7_95::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
+{
+ const float *cTab[2] = {matchCost,mismatchCost};
+ for (unsigned i=0; i<mNumCands; i++) {
+ vCand& thisCand = mCandidates[i];
+ const unsigned mismatched = inSample ^ (thisCand.oState);
+ for (unsigned i = 0; i < mIRate; i++) {
+ thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
+ }
+ }
+}
+
+
+void ViterbiTCH_AFS7_95::pruneCandidates()
+{
+ const vCand* c1 = mCandidates; // 0-prefix
+ const vCand* c2 = mCandidates + mIStates; // 1-prefix
+ for (unsigned i=0; i<mIStates; i++) {
+ if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
+ else mSurvivors[i] = c2[i];
+ }
+}
+
+
+const ViterbiTCH_AFS7_95::vCand& ViterbiTCH_AFS7_95::minCost() const
+{
+ int minIndex = 0;
+ float minCost = mSurvivors[0].cost;
+ for (unsigned i=1; i<mIStates; i++) {
+ const float thisCost = mSurvivors[i].cost;
+ if (thisCost>=minCost) continue;
+ minCost = thisCost;
+ minIndex=i;
+ }
+ return mSurvivors[minIndex];
+}
+
+
+const ViterbiTCH_AFS7_95::vCand& ViterbiTCH_AFS7_95::step(uint32_t inSample, const float *probs, const float *iprobs)
+{
+ branchCandidates();
+ getSoftCostMetrics(inSample,probs,iprobs);
+ pruneCandidates();
+ return minCost();
+}
+
+
+
+void ViterbiTCH_AFS7_95::decode(const SoftVector &in, BitVector& target)
+{
+ ViterbiTCH_AFS7_95 &decoder = *this;
+ const size_t sz = in.size() - 18;
+ const unsigned deferral = decoder.deferral();
+ const size_t ctsz = sz + deferral*decoder.iRate();
+ assert(sz == decoder.iRate()*target.size());
+
+ // Build a "history" array where each element contains the full history.
+ uint32_t history[ctsz];
+ {
+ BitVector bits = in.sliced();
+ uint32_t accum = 0;
+ for (size_t i=0; i<sz; i++) {
+ accum = (accum<<1) | bits.bit(i);
+ history[i] = accum;
+ }
+ // Repeat last bit at the end.
+ for (size_t i=sz; i<ctsz; i++) {
+ accum = (accum<<1) | (accum & 0x01);
+ history[i] = accum;
+ }
+ }
+
+ // Precompute metric tables.
+ float matchCostTable[ctsz];
+ float mismatchCostTable[ctsz];
+ {
+ const float *dp = in.begin();
+ for (size_t i=0; i<sz; i++) {
+ // pVal is the probability that a bit is correct.
+ // ipVal is the probability that a bit is incorrect.
+ float pVal = dp[i];
+ if (pVal>0.5F) pVal = 1.0F-pVal;
+ float ipVal = 1.0F-pVal;
+ // This is a cheap approximation to an ideal cost function.
+ if (pVal<0.01F) pVal = 0.01;
+ if (ipVal<0.01F) ipVal = 0.01;
+ matchCostTable[i] = 0.25F/ipVal;
+ mismatchCostTable[i] = 0.25F/pVal;
+ }
+
+ // pad end of table with unknowns
+ for (size_t i=sz; i<ctsz; i++) {
+ matchCostTable[i] = 0.5F;
+ mismatchCostTable[i] = 0.5F;
+ }
+ }
+
+ {
+ decoder.initializeStates();
+ // Each sample of history[] carries its history.
+ // So we only have to process every iRate-th sample.
+ const unsigned step = decoder.iRate();
+ // input pointer
+ const uint32_t *ip = history + step - 1;
+ // output pointers
+ char *op = target.begin();
+ const char *const opt = target.end();
+ // table pointers
+ const float* match = matchCostTable;
+ const float* mismatch = mismatchCostTable;
+ size_t oCount = 0;
+ while (op<opt) {
+ // Viterbi algorithm
+ assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
+ assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
+ const ViterbiTCH_AFS7_95::vCand &minCost = decoder.step(*ip, match, mismatch);
+ ip += step;
+ match += step;
+ mismatch += step;
+ // output
+ if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
+ oCount++;
+ }
+ }
+}
+
+
+
+ViterbiTCH_AFS7_4::ViterbiTCH_AFS7_4()
+{
+ assert(mDeferral < 32);
+ mCoeffs[0] = 0x01b;
+ mCoeffsFB[0] = 0x01f;
+ mCoeffs[1] = 0x015;
+ mCoeffsFB[1] = 0x01f;
+ mCoeffs[2] = 0x01f;
+ mCoeffsFB[2] = 0x01f;
+ for (unsigned i = 0; i < mIRate; i++) {
+ computeStateTables(i);
+ }
+ computeGeneratorTable();
+}
+
+
+
+
+void ViterbiTCH_AFS7_4::initializeStates()
+{
+ for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
+ for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
+}
+
+
+
+void ViterbiTCH_AFS7_4::computeStateTables(unsigned g)
+{
+ assert(g<mIRate);
+ for (unsigned state=0; state<mIStates; state++) {
+ for (unsigned in = 0; in <= 1; in++) {
+ uint32_t inputVal = (state<<1) | in;
+ mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
+ }
+ }
+}
+
+void ViterbiTCH_AFS7_4::computeGeneratorTable()
+{
+ for (unsigned index=0; index<mIStates*2; index++) {
+ uint32_t t = 0;
+ for (unsigned i = 0; i < mIRate; i++) {
+ t = (t << 1) | mStateTable[i][index];
+ }
+ mGeneratorTable[index] = t;
+ }
+}
+
+
+
+
+
+
+void ViterbiTCH_AFS7_4::branchCandidates()
+{
+ // Branch to generate new input states.
+ const vCand *sp = mSurvivors;
+ for (unsigned cand=0; cand<mNumCands; cand+=2) {
+ uint32_t oStateShifted = (sp->oState) << mIRate;
+ for (unsigned in = 0; in <= 1; in++) {
+ mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
+ mCandidates[cand+in].cost = sp->cost;
+ uint32_t outputs = oStateShifted;
+ for (unsigned out = 0; out < mIRate; out++) {
+ char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
+ char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
+ mCandidates[cand+in].rState[out] = rState;
+ outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
+ }
+ mCandidates[cand+in].oState = outputs;
+ }
+ sp++;
+ }
+}
+
+
+void ViterbiTCH_AFS7_4::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
+{
+ const float *cTab[2] = {matchCost,mismatchCost};
+ for (unsigned i=0; i<mNumCands; i++) {
+ vCand& thisCand = mCandidates[i];
+ const unsigned mismatched = inSample ^ (thisCand.oState);
+ for (unsigned i = 0; i < mIRate; i++) {
+ thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
+ }
+ }
+}
+
+
+void ViterbiTCH_AFS7_4::pruneCandidates()
+{
+ const vCand* c1 = mCandidates; // 0-prefix
+ const vCand* c2 = mCandidates + mIStates; // 1-prefix
+ for (unsigned i=0; i<mIStates; i++) {
+ if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
+ else mSurvivors[i] = c2[i];
+ }
+}
+
+
+const ViterbiTCH_AFS7_4::vCand& ViterbiTCH_AFS7_4::minCost() const
+{
+ int minIndex = 0;
+ float minCost = mSurvivors[0].cost;
+ for (unsigned i=1; i<mIStates; i++) {
+ const float thisCost = mSurvivors[i].cost;
+ if (thisCost>=minCost) continue;
+ minCost = thisCost;
+ minIndex=i;
+ }
+ return mSurvivors[minIndex];
+}
+
+
+const ViterbiTCH_AFS7_4::vCand& ViterbiTCH_AFS7_4::step(uint32_t inSample, const float *probs, const float *iprobs)
+{
+ branchCandidates();
+ getSoftCostMetrics(inSample,probs,iprobs);
+ pruneCandidates();
+ return minCost();
+}
+
+
+
+void ViterbiTCH_AFS7_4::decode(const SoftVector &in, BitVector& target)
+{
+ ViterbiTCH_AFS7_4 &decoder = *this;
+ const size_t sz = in.size() - 12;
+ const unsigned deferral = decoder.deferral();
+ const size_t ctsz = sz + deferral*decoder.iRate();
+ assert(sz == decoder.iRate()*target.size());
+
+ // Build a "history" array where each element contains the full history.
+ uint32_t history[ctsz];
+ {
+ BitVector bits = in.sliced();
+ uint32_t accum = 0;
+ for (size_t i=0; i<sz; i++) {
+ accum = (accum<<1) | bits.bit(i);
+ history[i] = accum;
+ }
+ // Repeat last bit at the end.
+ for (size_t i=sz; i<ctsz; i++) {
+ accum = (accum<<1) | (accum & 0x01);
+ history[i] = accum;
+ }
+ }
+
+ // Precompute metric tables.
+ float matchCostTable[ctsz];
+ float mismatchCostTable[ctsz];
+ {
+ const float *dp = in.begin();
+ for (size_t i=0; i<sz; i++) {
+ // pVal is the probability that a bit is correct.
+ // ipVal is the probability that a bit is incorrect.
+ float pVal = dp[i];
+ if (pVal>0.5F) pVal = 1.0F-pVal;
+ float ipVal = 1.0F-pVal;
+ // This is a cheap approximation to an ideal cost function.
+ if (pVal<0.01F) pVal = 0.01;
+ if (ipVal<0.01F) ipVal = 0.01;
+ matchCostTable[i] = 0.25F/ipVal;
+ mismatchCostTable[i] = 0.25F/pVal;
+ }
+
+ // pad end of table with unknowns
+ for (size_t i=sz; i<ctsz; i++) {
+ matchCostTable[i] = 0.5F;
+ mismatchCostTable[i] = 0.5F;
+ }
+ }
+
+ {
+ decoder.initializeStates();
+ // Each sample of history[] carries its history.
+ // So we only have to process every iRate-th sample.
+ const unsigned step = decoder.iRate();
+ // input pointer
+ const uint32_t *ip = history + step - 1;
+ // output pointers
+ char *op = target.begin();
+ const char *const opt = target.end();
+ // table pointers
+ const float* match = matchCostTable;
+ const float* mismatch = mismatchCostTable;
+ size_t oCount = 0;
+ while (op<opt) {
+ // Viterbi algorithm
+ assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
+ assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
+ const ViterbiTCH_AFS7_4::vCand &minCost = decoder.step(*ip, match, mismatch);
+ ip += step;
+ match += step;
+ mismatch += step;
+ // output
+ if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
+ oCount++;
+ }
+ }
+}
+
+
+
+ViterbiTCH_AFS6_7::ViterbiTCH_AFS6_7()
+{
+ assert(mDeferral < 32);
+ mCoeffs[0] = 0x01b;
+ mCoeffsFB[0] = 0x01f;
+ mCoeffs[1] = 0x015;
+ mCoeffsFB[1] = 0x01f;
+ mCoeffs[2] = 0x01f;
+ mCoeffsFB[2] = 0x01f;
+ mCoeffs[3] = 0x01f;
+ mCoeffsFB[3] = 0x01f;
+ for (unsigned i = 0; i < mIRate; i++) {
+ computeStateTables(i);
+ }
+ computeGeneratorTable();
+}
+
+
+
+
+void ViterbiTCH_AFS6_7::initializeStates()
+{
+ for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
+ for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
+}
+
+
+
+void ViterbiTCH_AFS6_7::computeStateTables(unsigned g)
+{
+ assert(g<mIRate);
+ for (unsigned state=0; state<mIStates; state++) {
+ for (unsigned in = 0; in <= 1; in++) {
+ uint32_t inputVal = (state<<1) | in;
+ mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
+ }
+ }
+}
+
+void ViterbiTCH_AFS6_7::computeGeneratorTable()
+{
+ for (unsigned index=0; index<mIStates*2; index++) {
+ uint32_t t = 0;
+ for (unsigned i = 0; i < mIRate; i++) {
+ t = (t << 1) | mStateTable[i][index];
+ }
+ mGeneratorTable[index] = t;
+ }
+}
+
+
+
+
+
+
+void ViterbiTCH_AFS6_7::branchCandidates()
+{
+ // Branch to generate new input states.
+ const vCand *sp = mSurvivors;
+ for (unsigned cand=0; cand<mNumCands; cand+=2) {
+ uint32_t oStateShifted = (sp->oState) << mIRate;
+ for (unsigned in = 0; in <= 1; in++) {
+ mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
+ mCandidates[cand+in].cost = sp->cost;
+ uint32_t outputs = oStateShifted;
+ for (unsigned out = 0; out < mIRate; out++) {
+ char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
+ char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
+ mCandidates[cand+in].rState[out] = rState;
+ outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
+ }
+ mCandidates[cand+in].oState = outputs;
+ }
+ sp++;
+ }
+}
+
+
+void ViterbiTCH_AFS6_7::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
+{
+ const float *cTab[2] = {matchCost,mismatchCost};
+ for (unsigned i=0; i<mNumCands; i++) {
+ vCand& thisCand = mCandidates[i];
+ const unsigned mismatched = inSample ^ (thisCand.oState);
+ for (unsigned i = 0; i < mIRate; i++) {
+ thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
+ }
+ }
+}
+
+
+void ViterbiTCH_AFS6_7::pruneCandidates()
+{
+ const vCand* c1 = mCandidates; // 0-prefix
+ const vCand* c2 = mCandidates + mIStates; // 1-prefix
+ for (unsigned i=0; i<mIStates; i++) {
+ if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
+ else mSurvivors[i] = c2[i];
+ }
+}
+
+
+const ViterbiTCH_AFS6_7::vCand& ViterbiTCH_AFS6_7::minCost() const
+{
+ int minIndex = 0;
+ float minCost = mSurvivors[0].cost;
+ for (unsigned i=1; i<mIStates; i++) {
+ const float thisCost = mSurvivors[i].cost;
+ if (thisCost>=minCost) continue;
+ minCost = thisCost;
+ minIndex=i;
+ }
+ return mSurvivors[minIndex];
+}
+
+
+const ViterbiTCH_AFS6_7::vCand& ViterbiTCH_AFS6_7::step(uint32_t inSample, const float *probs, const float *iprobs)
+{
+ branchCandidates();
+ getSoftCostMetrics(inSample,probs,iprobs);
+ pruneCandidates();
+ return minCost();
+}
+
+
+
+void ViterbiTCH_AFS6_7::decode(const SoftVector &in, BitVector& target)
+{
+ ViterbiTCH_AFS6_7 &decoder = *this;
+ const size_t sz = in.size() - 16;
+ const unsigned deferral = decoder.deferral();
+ const size_t ctsz = sz + deferral*decoder.iRate();
+ assert(sz == decoder.iRate()*target.size());
+
+ // Build a "history" array where each element contains the full history.
+ uint32_t history[ctsz];
+ {
+ BitVector bits = in.sliced();
+ uint32_t accum = 0;
+ for (size_t i=0; i<sz; i++) {
+ accum = (accum<<1) | bits.bit(i);
+ history[i] = accum;
+ }
+ // Repeat last bit at the end.
+ for (size_t i=sz; i<ctsz; i++) {
+ accum = (accum<<1) | (accum & 0x01);
+ history[i] = accum;
+ }
+ }
+
+ // Precompute metric tables.
+ float matchCostTable[ctsz];
+ float mismatchCostTable[ctsz];
+ {
+ const float *dp = in.begin();
+ for (size_t i=0; i<sz; i++) {
+ // pVal is the probability that a bit is correct.
+ // ipVal is the probability that a bit is incorrect.
+ float pVal = dp[i];
+ if (pVal>0.5F) pVal = 1.0F-pVal;
+ float ipVal = 1.0F-pVal;
+ // This is a cheap approximation to an ideal cost function.
+ if (pVal<0.01F) pVal = 0.01;
+ if (ipVal<0.01F) ipVal = 0.01;
+ matchCostTable[i] = 0.25F/ipVal;
+ mismatchCostTable[i] = 0.25F/pVal;
+ }
+
+ // pad end of table with unknowns
+ for (size_t i=sz; i<ctsz; i++) {
+ matchCostTable[i] = 0.5F;
+ mismatchCostTable[i] = 0.5F;
+ }
+ }
+
+ {
+ decoder.initializeStates();
+ // Each sample of history[] carries its history.
+ // So we only have to process every iRate-th sample.
+ const unsigned step = decoder.iRate();
+ // input pointer
+ const uint32_t *ip = history + step - 1;
+ // output pointers
+ char *op = target.begin();
+ const char *const opt = target.end();
+ // table pointers
+ const float* match = matchCostTable;
+ const float* mismatch = mismatchCostTable;
+ size_t oCount = 0;
+ while (op<opt) {
+ // Viterbi algorithm
+ assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
+ assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
+ const ViterbiTCH_AFS6_7::vCand &minCost = decoder.step(*ip, match, mismatch);
+ ip += step;
+ match += step;
+ mismatch += step;
+ // output
+ if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
+ oCount++;
+ }
+ }
+}
+
+
+
+ViterbiTCH_AFS5_9::ViterbiTCH_AFS5_9()
+{
+ assert(mDeferral < 32);
+ mCoeffs[0] = 0x06d;
+ mCoeffsFB[0] = 0x05f;
+ mCoeffs[1] = 0x053;
+ mCoeffsFB[1] = 0x05f;
+ mCoeffs[2] = 0x05f;
+ mCoeffsFB[2] = 0x05f;
+ mCoeffs[3] = 0x05f;
+ mCoeffsFB[3] = 0x05f;
+ for (unsigned i = 0; i < mIRate; i++) {
+ computeStateTables(i);
+ }
+ computeGeneratorTable();
+}
+
+
+
+
+void ViterbiTCH_AFS5_9::initializeStates()
+{
+ for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
+ for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
+}
+
+
+
+void ViterbiTCH_AFS5_9::computeStateTables(unsigned g)
+{
+ assert(g<mIRate);
+ for (unsigned state=0; state<mIStates; state++) {
+ for (unsigned in = 0; in <= 1; in++) {
+ uint32_t inputVal = (state<<1) | in;
+ mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
+ }
+ }
+}
+
+void ViterbiTCH_AFS5_9::computeGeneratorTable()
+{
+ for (unsigned index=0; index<mIStates*2; index++) {
+ uint32_t t = 0;
+ for (unsigned i = 0; i < mIRate; i++) {
+ t = (t << 1) | mStateTable[i][index];
+ }
+ mGeneratorTable[index] = t;
+ }
+}
+
+
+
+
+
+
+void ViterbiTCH_AFS5_9::branchCandidates()
+{
+ // Branch to generate new input states.
+ const vCand *sp = mSurvivors;
+ for (unsigned cand=0; cand<mNumCands; cand+=2) {
+ uint32_t oStateShifted = (sp->oState) << mIRate;
+ for (unsigned in = 0; in <= 1; in++) {
+ mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
+ mCandidates[cand+in].cost = sp->cost;
+ uint32_t outputs = oStateShifted;
+ for (unsigned out = 0; out < mIRate; out++) {
+ char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
+ char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
+ mCandidates[cand+in].rState[out] = rState;
+ outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
+ }
+ mCandidates[cand+in].oState = outputs;
+ }
+ sp++;
+ }
+}
+
+
+void ViterbiTCH_AFS5_9::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
+{
+ const float *cTab[2] = {matchCost,mismatchCost};
+ for (unsigned i=0; i<mNumCands; i++) {
+ vCand& thisCand = mCandidates[i];
+ const unsigned mismatched = inSample ^ (thisCand.oState);
+ for (unsigned i = 0; i < mIRate; i++) {
+ thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
+ }
+ }
+}
+
+
+void ViterbiTCH_AFS5_9::pruneCandidates()
+{
+ const vCand* c1 = mCandidates; // 0-prefix
+ const vCand* c2 = mCandidates + mIStates; // 1-prefix
+ for (unsigned i=0; i<mIStates; i++) {
+ if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
+ else mSurvivors[i] = c2[i];
+ }
+}
+
+
+const ViterbiTCH_AFS5_9::vCand& ViterbiTCH_AFS5_9::minCost() const
+{
+ int minIndex = 0;
+ float minCost = mSurvivors[0].cost;
+ for (unsigned i=1; i<mIStates; i++) {
+ const float thisCost = mSurvivors[i].cost;
+ if (thisCost>=minCost) continue;
+ minCost = thisCost;
+ minIndex=i;
+ }
+ return mSurvivors[minIndex];
+}
+
+
+const ViterbiTCH_AFS5_9::vCand& ViterbiTCH_AFS5_9::step(uint32_t inSample, const float *probs, const float *iprobs)
+{
+ branchCandidates();
+ getSoftCostMetrics(inSample,probs,iprobs);
+ pruneCandidates();
+ return minCost();
+}
+
+
+
+void ViterbiTCH_AFS5_9::decode(const SoftVector &in, BitVector& target)
+{
+ ViterbiTCH_AFS5_9 &decoder = *this;
+ const size_t sz = in.size() - 24;
+ const unsigned deferral = decoder.deferral();
+ const size_t ctsz = sz + deferral*decoder.iRate();
+ assert(sz == decoder.iRate()*target.size());
+
+ // Build a "history" array where each element contains the full history.
+ uint32_t history[ctsz];
+ {
+ BitVector bits = in.sliced();
+ uint32_t accum = 0;
+ for (size_t i=0; i<sz; i++) {
+ accum = (accum<<1) | bits.bit(i);
+ history[i] = accum;
+ }
+ // Repeat last bit at the end.
+ for (size_t i=sz; i<ctsz; i++) {
+ accum = (accum<<1) | (accum & 0x01);
+ history[i] = accum;
+ }
+ }
+
+ // Precompute metric tables.
+ float matchCostTable[ctsz];
+ float mismatchCostTable[ctsz];
+ {
+ const float *dp = in.begin();
+ for (size_t i=0; i<sz; i++) {
+ // pVal is the probability that a bit is correct.
+ // ipVal is the probability that a bit is incorrect.
+ float pVal = dp[i];
+ if (pVal>0.5F) pVal = 1.0F-pVal;
+ float ipVal = 1.0F-pVal;
+ // This is a cheap approximation to an ideal cost function.
+ if (pVal<0.01F) pVal = 0.01;
+ if (ipVal<0.01F) ipVal = 0.01;
+ matchCostTable[i] = 0.25F/ipVal;
+ mismatchCostTable[i] = 0.25F/pVal;
+ }
+
+ // pad end of table with unknowns
+ for (size_t i=sz; i<ctsz; i++) {
+ matchCostTable[i] = 0.5F;
+ mismatchCostTable[i] = 0.5F;
+ }
+ }
+
+ {
+ decoder.initializeStates();
+ // Each sample of history[] carries its history.
+ // So we only have to process every iRate-th sample.
+ const unsigned step = decoder.iRate();
+ // input pointer
+ const uint32_t *ip = history + step - 1;
+ // output pointers
+ char *op = target.begin();
+ const char *const opt = target.end();
+ // table pointers
+ const float* match = matchCostTable;
+ const float* mismatch = mismatchCostTable;
+ size_t oCount = 0;
+ while (op<opt) {
+ // Viterbi algorithm
+ assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
+ assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
+ const ViterbiTCH_AFS5_9::vCand &minCost = decoder.step(*ip, match, mismatch);
+ ip += step;
+ match += step;
+ mismatch += step;
+ // output
+ if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
+ oCount++;
+ }
+ }
+}
+
+
+
+ViterbiTCH_AFS5_15::ViterbiTCH_AFS5_15()
+{
+ assert(mDeferral < 32);
+ mCoeffs[0] = 0x01b;
+ mCoeffsFB[0] = 0x01f;
+ mCoeffs[1] = 0x01b;
+ mCoeffsFB[1] = 0x01f;
+ mCoeffs[2] = 0x015;
+ mCoeffsFB[2] = 0x01f;
+ mCoeffs[3] = 0x01f;
+ mCoeffsFB[3] = 0x01f;
+ mCoeffs[4] = 0x01f;
+ mCoeffsFB[4] = 0x01f;
+ for (unsigned i = 0; i < mIRate; i++) {
+ computeStateTables(i);
+ }
+ computeGeneratorTable();
+}
+
+
+
+
+void ViterbiTCH_AFS5_15::initializeStates()
+{
+ for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
+ for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
+}
+
+
+
+void ViterbiTCH_AFS5_15::computeStateTables(unsigned g)
+{
+ assert(g<mIRate);
+ for (unsigned state=0; state<mIStates; state++) {
+ for (unsigned in = 0; in <= 1; in++) {
+ uint32_t inputVal = (state<<1) | in;
+ mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
+ }
+ }
+}
+
+void ViterbiTCH_AFS5_15::computeGeneratorTable()
+{
+ for (unsigned index=0; index<mIStates*2; index++) {
+ uint32_t t = 0;
+ for (unsigned i = 0; i < mIRate; i++) {
+ t = (t << 1) | mStateTable[i][index];
+ }
+ mGeneratorTable[index] = t;
+ }
+}
+
+
+
+
+
+
+void ViterbiTCH_AFS5_15::branchCandidates()
+{
+ // Branch to generate new input states.
+ const vCand *sp = mSurvivors;
+ for (unsigned cand=0; cand<mNumCands; cand+=2) {
+ uint32_t oStateShifted = (sp->oState) << mIRate;
+ for (unsigned in = 0; in <= 1; in++) {
+ mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
+ mCandidates[cand+in].cost = sp->cost;
+ uint32_t outputs = oStateShifted;
+ for (unsigned out = 0; out < mIRate; out++) {
+ char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
+ char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
+ mCandidates[cand+in].rState[out] = rState;
+ outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
+ }
+ mCandidates[cand+in].oState = outputs;
+ }
+ sp++;
+ }
+}
+
+
+void ViterbiTCH_AFS5_15::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
+{
+ const float *cTab[2] = {matchCost,mismatchCost};
+ for (unsigned i=0; i<mNumCands; i++) {
+ vCand& thisCand = mCandidates[i];
+ const unsigned mismatched = inSample ^ (thisCand.oState);
+ for (unsigned i = 0; i < mIRate; i++) {
+ thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
+ }
+ }
+}
+
+
+void ViterbiTCH_AFS5_15::pruneCandidates()
+{
+ const vCand* c1 = mCandidates; // 0-prefix
+ const vCand* c2 = mCandidates + mIStates; // 1-prefix
+ for (unsigned i=0; i<mIStates; i++) {
+ if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
+ else mSurvivors[i] = c2[i];
+ }
+}
+
+
+const ViterbiTCH_AFS5_15::vCand& ViterbiTCH_AFS5_15::minCost() const
+{
+ int minIndex = 0;
+ float minCost = mSurvivors[0].cost;
+ for (unsigned i=1; i<mIStates; i++) {
+ const float thisCost = mSurvivors[i].cost;
+ if (thisCost>=minCost) continue;
+ minCost = thisCost;
+ minIndex=i;
+ }
+ return mSurvivors[minIndex];
+}
+
+
+const ViterbiTCH_AFS5_15::vCand& ViterbiTCH_AFS5_15::step(uint32_t inSample, const float *probs, const float *iprobs)
+{
+ branchCandidates();
+ getSoftCostMetrics(inSample,probs,iprobs);
+ pruneCandidates();
+ return minCost();
+}
+
+
+
+void ViterbiTCH_AFS5_15::decode(const SoftVector &in, BitVector& target)
+{
+ ViterbiTCH_AFS5_15 &decoder = *this;
+ const size_t sz = in.size() - 20;
+ const unsigned deferral = decoder.deferral();
+ const size_t ctsz = sz + deferral*decoder.iRate();
+ assert(sz == decoder.iRate()*target.size());
+
+ // Build a "history" array where each element contains the full history.
+ uint32_t history[ctsz];
+ {
+ BitVector bits = in.sliced();
+ uint32_t accum = 0;
+ for (size_t i=0; i<sz; i++) {
+ accum = (accum<<1) | bits.bit(i);
+ history[i] = accum;
+ }
+ // Repeat last bit at the end.
+ for (size_t i=sz; i<ctsz; i++) {
+ accum = (accum<<1) | (accum & 0x01);
+ history[i] = accum;
+ }
+ }
+
+ // Precompute metric tables.
+ float matchCostTable[ctsz];
+ float mismatchCostTable[ctsz];
+ {
+ const float *dp = in.begin();
+ for (size_t i=0; i<sz; i++) {
+ // pVal is the probability that a bit is correct.
+ // ipVal is the probability that a bit is incorrect.
+ float pVal = dp[i];
+ if (pVal>0.5F) pVal = 1.0F-pVal;
+ float ipVal = 1.0F-pVal;
+ // This is a cheap approximation to an ideal cost function.
+ if (pVal<0.01F) pVal = 0.01;
+ if (ipVal<0.01F) ipVal = 0.01;
+ matchCostTable[i] = 0.25F/ipVal;
+ mismatchCostTable[i] = 0.25F/pVal;
+ }
+
+ // pad end of table with unknowns
+ for (size_t i=sz; i<ctsz; i++) {
+ matchCostTable[i] = 0.5F;
+ mismatchCostTable[i] = 0.5F;
+ }
+ }
+
+ {
+ decoder.initializeStates();
+ // Each sample of history[] carries its history.
+ // So we only have to process every iRate-th sample.
+ const unsigned step = decoder.iRate();
+ // input pointer
+ const uint32_t *ip = history + step - 1;
+ // output pointers
+ char *op = target.begin();
+ const char *const opt = target.end();
+ // table pointers
+ const float* match = matchCostTable;
+ const float* mismatch = mismatchCostTable;
+ size_t oCount = 0;
+ while (op<opt) {
+ // Viterbi algorithm
+ assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
+ assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
+ const ViterbiTCH_AFS5_15::vCand &minCost = decoder.step(*ip, match, mismatch);
+ ip += step;
+ match += step;
+ mismatch += step;
+ // output
+ if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
+ oCount++;
+ }
+ }
+}
+
+
+
+ViterbiTCH_AFS4_75::ViterbiTCH_AFS4_75()
+{
+ assert(mDeferral < 32);
+ mCoeffs[0] = 0x06d;
+ mCoeffsFB[0] = 0x05f;
+ mCoeffs[1] = 0x06d;
+ mCoeffsFB[1] = 0x05f;
+ mCoeffs[2] = 0x053;
+ mCoeffsFB[2] = 0x05f;
+ mCoeffs[3] = 0x05f;
+ mCoeffsFB[3] = 0x05f;
+ mCoeffs[4] = 0x05f;
+ mCoeffsFB[4] = 0x05f;
+ for (unsigned i = 0; i < mIRate; i++) {
+ computeStateTables(i);
+ }
+ computeGeneratorTable();
+}
+
+
+
+
+void ViterbiTCH_AFS4_75::initializeStates()
+{
+ for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
+ for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
+}
+
+
+
+void ViterbiTCH_AFS4_75::computeStateTables(unsigned g)
+{
+ assert(g<mIRate);
+ for (unsigned state=0; state<mIStates; state++) {
+ for (unsigned in = 0; in <= 1; in++) {
+ uint32_t inputVal = (state<<1) | in;
+ mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
+ }
+ }
+}
+
+void ViterbiTCH_AFS4_75::computeGeneratorTable()
+{
+ for (unsigned index=0; index<mIStates*2; index++) {
+ uint32_t t = 0;
+ for (unsigned i = 0; i < mIRate; i++) {
+ t = (t << 1) | mStateTable[i][index];
+ }
+ mGeneratorTable[index] = t;
+ }
+}
+
+
+
+
+
+
+void ViterbiTCH_AFS4_75::branchCandidates()
+{
+ // Branch to generate new input states.
+ const vCand *sp = mSurvivors;
+ for (unsigned cand=0; cand<mNumCands; cand+=2) {
+ uint32_t oStateShifted = (sp->oState) << mIRate;
+ for (unsigned in = 0; in <= 1; in++) {
+ mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
+ mCandidates[cand+in].cost = sp->cost;
+ uint32_t outputs = oStateShifted;
+ for (unsigned out = 0; out < mIRate; out++) {
+ char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
+ char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
+ mCandidates[cand+in].rState[out] = rState;
+ outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
+ }
+ mCandidates[cand+in].oState = outputs;
+ }
+ sp++;
+ }
+}
+
+
+void ViterbiTCH_AFS4_75::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
+{
+ const float *cTab[2] = {matchCost,mismatchCost};
+ for (unsigned i=0; i<mNumCands; i++) {
+ vCand& thisCand = mCandidates[i];
+ const unsigned mismatched = inSample ^ (thisCand.oState);
+ for (unsigned i = 0; i < mIRate; i++) {
+ thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
+ }
+ }
+}
+
+
+void ViterbiTCH_AFS4_75::pruneCandidates()
+{
+ const vCand* c1 = mCandidates; // 0-prefix
+ const vCand* c2 = mCandidates + mIStates; // 1-prefix
+ for (unsigned i=0; i<mIStates; i++) {
+ if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
+ else mSurvivors[i] = c2[i];
+ }
+}
+
+
+const ViterbiTCH_AFS4_75::vCand& ViterbiTCH_AFS4_75::minCost() const
+{
+ int minIndex = 0;
+ float minCost = mSurvivors[0].cost;
+ for (unsigned i=1; i<mIStates; i++) {
+ const float thisCost = mSurvivors[i].cost;
+ if (thisCost>=minCost) continue;
+ minCost = thisCost;
+ minIndex=i;
+ }
+ return mSurvivors[minIndex];
+}
+
+
+const ViterbiTCH_AFS4_75::vCand& ViterbiTCH_AFS4_75::step(uint32_t inSample, const float *probs, const float *iprobs)
+{
+ branchCandidates();
+ getSoftCostMetrics(inSample,probs,iprobs);
+ pruneCandidates();
+ return minCost();
+}
+
+
+
+void ViterbiTCH_AFS4_75::decode(const SoftVector &in, BitVector& target)
+{
+ ViterbiTCH_AFS4_75 &decoder = *this;
+ const size_t sz = in.size() - 30;
+ const unsigned deferral = decoder.deferral();
+ const size_t ctsz = sz + deferral*decoder.iRate();
+ assert(sz == decoder.iRate()*target.size());
+
+ // Build a "history" array where each element contains the full history.
+ uint32_t history[ctsz];
+ {
+ BitVector bits = in.sliced();
+ uint32_t accum = 0;
+ for (size_t i=0; i<sz; i++) {
+ accum = (accum<<1) | bits.bit(i);
+ history[i] = accum;
+ }
+ // Repeat last bit at the end.
+ for (size_t i=sz; i<ctsz; i++) {
+ accum = (accum<<1) | (accum & 0x01);
+ history[i] = accum;
+ }
+ }
+
+ // Precompute metric tables.
+ float matchCostTable[ctsz];
+ float mismatchCostTable[ctsz];
+ {
+ const float *dp = in.begin();
+ for (size_t i=0; i<sz; i++) {
+ // pVal is the probability that a bit is correct.
+ // ipVal is the probability that a bit is incorrect.
+ float pVal = dp[i];
+ if (pVal>0.5F) pVal = 1.0F-pVal;
+ float ipVal = 1.0F-pVal;
+ // This is a cheap approximation to an ideal cost function.
+ if (pVal<0.01F) pVal = 0.01;
+ if (ipVal<0.01F) ipVal = 0.01;
+ matchCostTable[i] = 0.25F/ipVal;
+ mismatchCostTable[i] = 0.25F/pVal;
+ }
+
+ // pad end of table with unknowns
+ for (size_t i=sz; i<ctsz; i++) {
+ matchCostTable[i] = 0.5F;
+ mismatchCostTable[i] = 0.5F;
+ }
+ }
+
+ {
+ decoder.initializeStates();
+ // Each sample of history[] carries its history.
+ // So we only have to process every iRate-th sample.
+ const unsigned step = decoder.iRate();
+ // input pointer
+ const uint32_t *ip = history + step - 1;
+ // output pointers
+ char *op = target.begin();
+ const char *const opt = target.end();
+ // table pointers
+ const float* match = matchCostTable;
+ const float* mismatch = mismatchCostTable;
+ size_t oCount = 0;
+ while (op<opt) {
+ // Viterbi algorithm
+ assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
+ assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
+ const ViterbiTCH_AFS4_75::vCand &minCost = decoder.step(*ip, match, mismatch);
+ ip += step;
+ match += step;
+ mismatch += step;
+ // output
+ if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
+ oCount++;
+ }
+ }
+}
+
+
+
diff --git a/lib/decoding/AmrCoder.h b/lib/decoding/AmrCoder.h
new file mode 100644
index 0000000..c1df823
--- /dev/null
+++ b/lib/decoding/AmrCoder.h
@@ -0,0 +1,936 @@
+/*
+* Copyright 2013, 2014 Range Networks, Inc.
+*
+* This software is distributed under multiple licenses;
+* see the COPYING file in the main directory for licensing
+* information for this specific distribution.
+*
+* This use of this software may be subject to additional restrictions.
+* See the LEGAL file in the main directory for details.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+*/
+#ifndef _AMRCODER_H_
+#define _AMRCODER_H_
+#include <stdint.h>
+#include "BitVector.h"
+#include "Viterbi.h"
+
+
+
+/**
+ Class to represent recursive systematic convolutional coders/decoders of rate 1/2, memory length 4.
+*/
+class ViterbiTCH_AFS12_2 : public ViterbiBase {
+
+ private:
+ /**name Lots of precomputed elements so the compiler can optimize like hell. */
+ //@{
+ /**@name Core values. */
+ //@{
+ static const unsigned mIRate = 2; ///< reciprocal of rate
+ static const unsigned mOrder = 4; ///< memory length of generators
+ //@}
+ /**@name Derived values. */
+ //@{
+ static const unsigned mIStates = 0x01 << mOrder; ///< number of states, number of survivors
+ static const uint32_t mSMask = mIStates-1; ///< survivor mask
+ static const uint32_t mCMask = (mSMask<<1) | 0x01; ///< candidate mask
+ static const uint32_t mOMask = (0x01<<mIRate)-1; ///< ouput mask, all iRate low bits set
+ static const unsigned mNumCands = mIStates*2; ///< number of candidates to generate during branching
+ static const unsigned mDeferral = 6*mOrder; ///< deferral to be used
+ //@}
+ //@}
+
+ /** Precomputed tables. */
+ //@{
+ uint32_t mCoeffs[mIRate]; ///< output polynomial for each generator
+ uint32_t mCoeffsFB[mIRate]; ///< feedback polynomial for each generator
+ uint32_t mStateTable[mIRate][2*mIStates]; ///< precomputed generator output tables
+ uint32_t mGeneratorTable[2*mIStates]; ///< precomputed coder output table
+ //@}
+
+ public:
+
+ /**
+ A candidate sequence in a Viterbi decoder.
+ The 32-bit state register can support a deferral of 6 with a 4th-order coder.
+ */
+ typedef struct candStruct {
+ uint32_t iState; ///< encoder input associated with this candidate
+ uint32_t oState; ///< encoder output associated with this candidate
+ char rState[mIRate];///< real states of encoders associated with this candidate
+ float cost; ///< cost (metric value), float to support soft inputs
+ } vCand;
+
+ /** Clear a structure. */
+ void vitClear(vCand& v)
+ {
+ v.iState=0;
+ v.oState=0;
+ v.cost=0;
+ for (unsigned i = 0; i < mIRate; i++) v.rState[i] = 0;
+ }
+
+
+ private:
+
+ /**@name Survivors and candidates. */
+ //@{
+ vCand mSurvivors[mIStates]; ///< current survivor pool
+ vCand mCandidates[2*mIStates]; ///< current candidate pool
+ //@}
+
+ public:
+
+ unsigned iRate() const { return mIRate; }
+ uint32_t cMask() const { return mCMask; }
+ uint32_t stateTable(unsigned g, unsigned i) const { return mStateTable[g][i]; }
+ unsigned deferral() const { return mDeferral; }
+
+
+ ViterbiTCH_AFS12_2();
+
+ /** Set all cost metrics to zero. */
+ void initializeStates();
+
+ /**
+ Full cycle of the Viterbi algorithm: branch, metrics, prune, select.
+ @return reference to minimum-cost candidate.
+ */
+ const vCand& step(uint32_t inSample, const float *probs, const float *iprobs);
+
+ private:
+
+ /** Branch survivors into new candidates. */
+ void branchCandidates();
+
+ /** Compute cost metrics for soft-inputs. */
+ void getSoftCostMetrics(uint32_t inSample, const float *probs, const float *iprobs);
+
+ /** Select survivors from the candidate set. */
+ void pruneCandidates();
+
+ /** Find the minimum cost survivor. */
+ const vCand& minCost() const;
+
+ /**
+ Precompute the state tables.
+ @param g Generator index 0..((1/rate)-1)
+ */
+ void computeStateTables(unsigned g);
+
+ /**
+ Precompute the generator outputs.
+ mCoeffs must be defined first.
+ */
+ void computeGeneratorTable();
+ void encode(const BitVector &in, BitVector& target) const;
+ void decode(const SoftVector &in, BitVector& target);
+};
+
+
+
+/**
+ Class to represent recursive systematic convolutional coders/decoders of rate 1/3, memory length 4.
+*/
+class ViterbiTCH_AFS10_2 : public ViterbiBase {
+
+ private:
+ /**name Lots of precomputed elements so the compiler can optimize like hell. */
+ //@{
+ /**@name Core values. */
+ //@{
+ static const unsigned mIRate = 3; ///< reciprocal of rate
+ static const unsigned mOrder = 4; ///< memory length of generators
+ //@}
+ /**@name Derived values. */
+ //@{
+ static const unsigned mIStates = 0x01 << mOrder; ///< number of states, number of survivors
+ static const uint32_t mSMask = mIStates-1; ///< survivor mask
+ static const uint32_t mCMask = (mSMask<<1) | 0x01; ///< candidate mask
+ static const uint32_t mOMask = (0x01<<mIRate)-1; ///< ouput mask, all iRate low bits set
+ static const unsigned mNumCands = mIStates*2; ///< number of candidates to generate during branching
+ static const unsigned mDeferral = 6*mOrder; ///< deferral to be used
+ //@}
+ //@}
+
+ /** Precomputed tables. */
+ //@{
+ uint32_t mCoeffs[mIRate]; ///< output polynomial for each generator
+ uint32_t mCoeffsFB[mIRate]; ///< feedback polynomial for each generator
+ uint32_t mStateTable[mIRate][2*mIStates]; ///< precomputed generator output tables
+ uint32_t mGeneratorTable[2*mIStates]; ///< precomputed coder output table
+ //@}
+
+ public:
+
+ /**
+ A candidate sequence in a Viterbi decoder.
+ The 32-bit state register can support a deferral of 6 with a 4th-order coder.
+ */
+ typedef struct candStruct {
+ uint32_t iState; ///< encoder input associated with this candidate
+ uint32_t oState; ///< encoder output associated with this candidate
+ char rState[mIRate];///< real states of encoders associated with this candidate
+ float cost; ///< cost (metric value), float to support soft inputs
+ } vCand;
+
+ /** Clear a structure. */
+ void vitClear(vCand& v)
+ {
+ v.iState=0;
+ v.oState=0;
+ v.cost=0;
+ for (unsigned i = 0; i < mIRate; i++) v.rState[i] = 0;
+ }
+
+
+ private:
+
+ /**@name Survivors and candidates. */
+ //@{
+ vCand mSurvivors[mIStates]; ///< current survivor pool
+ vCand mCandidates[2*mIStates]; ///< current candidate pool
+ //@}
+
+ public:
+
+ unsigned iRate() const { return mIRate; }
+ uint32_t cMask() const { return mCMask; }
+ uint32_t stateTable(unsigned g, unsigned i) const { return mStateTable[g][i]; }
+ unsigned deferral() const { return mDeferral; }
+
+
+ ViterbiTCH_AFS10_2();
+
+ /** Set all cost metrics to zero. */
+ void initializeStates();
+
+ /**
+ Full cycle of the Viterbi algorithm: branch, metrics, prune, select.
+ @return reference to minimum-cost candidate.
+ */
+ const vCand& step(uint32_t inSample, const float *probs, const float *iprobs);
+
+ private:
+
+ /** Branch survivors into new candidates. */
+ void branchCandidates();
+
+ /** Compute cost metrics for soft-inputs. */
+ void getSoftCostMetrics(uint32_t inSample, const float *probs, const float *iprobs);
+
+ /** Select survivors from the candidate set. */
+ void pruneCandidates();
+
+ /** Find the minimum cost survivor. */
+ const vCand& minCost() const;
+
+ /**
+ Precompute the state tables.
+ @param g Generator index 0..((1/rate)-1)
+ */
+ void computeStateTables(unsigned g);
+
+ /**
+ Precompute the generator outputs.
+ mCoeffs must be defined first.
+ */
+ void computeGeneratorTable();
+ void encode(const BitVector &in, BitVector& target) const;
+ void decode(const SoftVector &in, BitVector& target);
+
+};
+
+
+
+/**
+ Class to represent recursive systematic convolutional coders/decoders of rate 1/3, memory length 6.
+*/
+class ViterbiTCH_AFS7_95 : public ViterbiBase {
+
+ private:
+ /**name Lots of precomputed elements so the compiler can optimize like hell. */
+ //@{
+ /**@name Core values. */
+ //@{
+ static const unsigned mIRate = 3; ///< reciprocal of rate
+ static const unsigned mOrder = 6; ///< memory length of generators
+ //@}
+ /**@name Derived values. */
+ //@{
+ static const unsigned mIStates = 0x01 << mOrder; ///< number of states, number of survivors
+ static const uint32_t mSMask = mIStates-1; ///< survivor mask
+ static const uint32_t mCMask = (mSMask<<1) | 0x01; ///< candidate mask
+ static const uint32_t mOMask = (0x01<<mIRate)-1; ///< ouput mask, all iRate low bits set
+ static const unsigned mNumCands = mIStates*2; ///< number of candidates to generate during branching
+ static const unsigned mDeferral = 5*mOrder; ///< deferral to be used
+ //@}
+ //@}
+
+ /** Precomputed tables. */
+ //@{
+ uint32_t mCoeffs[mIRate]; ///< output polynomial for each generator
+ uint32_t mCoeffsFB[mIRate]; ///< feedback polynomial for each generator
+ uint32_t mStateTable[mIRate][2*mIStates]; ///< precomputed generator output tables
+ uint32_t mGeneratorTable[2*mIStates]; ///< precomputed coder output table
+ //@}
+
+ public:
+
+ /**
+ A candidate sequence in a Viterbi decoder.
+ The 32-bit state register can support a deferral of 5*order with a 6th-order coder.
+ */
+ typedef struct candStruct {
+ uint32_t iState; ///< encoder input associated with this candidate
+ uint32_t oState; ///< encoder output associated with this candidate
+ char rState[mIRate];///< real states of encoders associated with this candidate
+ float cost; ///< cost (metric value), float to support soft inputs
+ } vCand;
+
+ /** Clear a structure. */
+ void vitClear(vCand& v)
+ {
+ v.iState=0;
+ v.oState=0;
+ v.cost=0;
+ for (unsigned i = 0; i < mIRate; i++) v.rState[i] = 0;
+ }
+
+
+ private:
+
+ /**@name Survivors and candidates. */
+ //@{
+ vCand mSurvivors[mIStates]; ///< current survivor pool
+ vCand mCandidates[2*mIStates]; ///< current candidate pool
+ //@}
+
+ public:
+
+ unsigned iRate() const { return mIRate; }
+ uint32_t cMask() const { return mCMask; }
+ uint32_t stateTable(unsigned g, unsigned i) const { return mStateTable[g][i]; }
+ unsigned deferral() const { return mDeferral; }
+
+
+ ViterbiTCH_AFS7_95();
+
+ /** Set all cost metrics to zero. */
+ void initializeStates();
+
+ /**
+ Full cycle of the Viterbi algorithm: branch, metrics, prune, select.
+ @return reference to minimum-cost candidate.
+ */
+ const vCand& step(uint32_t inSample, const float *probs, const float *iprobs);
+
+ private:
+
+ /** Branch survivors into new candidates. */
+ void branchCandidates();
+
+ /** Compute cost metrics for soft-inputs. */
+ void getSoftCostMetrics(uint32_t inSample, const float *probs, const float *iprobs);
+
+ /** Select survivors from the candidate set. */
+ void pruneCandidates();
+
+ /** Find the minimum cost survivor. */
+ const vCand& minCost() const;
+
+ /**
+ Precompute the state tables.
+ @param g Generator index 0..((1/rate)-1)
+ */
+ void computeStateTables(unsigned g);
+
+ /**
+ Precompute the generator outputs.
+ mCoeffs must be defined first.
+ */
+ void computeGeneratorTable();
+ void encode(const BitVector &in, BitVector& target) const;
+ void decode(const SoftVector &in, BitVector& target);
+
+};
+
+
+
+/**
+ Class to represent recursive systematic convolutional coders/decoders of rate 1/3, memory length 4.
+*/
+class ViterbiTCH_AFS7_4 : public ViterbiBase {
+
+ private:
+ /**name Lots of precomputed elements so the compiler can optimize like hell. */
+ //@{
+ /**@name Core values. */
+ //@{
+ static const unsigned mIRate = 3; ///< reciprocal of rate
+ static const unsigned mOrder = 4; ///< memory length of generators
+ //@}
+ /**@name Derived values. */
+ //@{
+ static const unsigned mIStates = 0x01 << mOrder; ///< number of states, number of survivors
+ static const uint32_t mSMask = mIStates-1; ///< survivor mask
+ static const uint32_t mCMask = (mSMask<<1) | 0x01; ///< candidate mask
+ static const uint32_t mOMask = (0x01<<mIRate)-1; ///< ouput mask, all iRate low bits set
+ static const unsigned mNumCands = mIStates*2; ///< number of candidates to generate during branching
+ static const unsigned mDeferral = 6*mOrder; ///< deferral to be used
+ //@}
+ //@}
+
+ /** Precomputed tables. */
+ //@{
+ uint32_t mCoeffs[mIRate]; ///< output polynomial for each generator
+ uint32_t mCoeffsFB[mIRate]; ///< feedback polynomial for each generator
+ uint32_t mStateTable[mIRate][2*mIStates]; ///< precomputed generator output tables
+ uint32_t mGeneratorTable[2*mIStates]; ///< precomputed coder output table
+ //@}
+
+ public:
+
+ /**
+ A candidate sequence in a Viterbi decoder.
+ The 32-bit state register can support a deferral of 6 with a 4th-order coder.
+ */
+ typedef struct candStruct {
+ uint32_t iState; ///< encoder input associated with this candidate
+ uint32_t oState; ///< encoder output associated with this candidate
+ char rState[mIRate];///< real states of encoders associated with this candidate
+ float cost; ///< cost (metric value), float to support soft inputs
+ } vCand;
+
+ /** Clear a structure. */
+ void vitClear(vCand& v)
+ {
+ v.iState=0;
+ v.oState=0;
+ v.cost=0;
+ for (unsigned i = 0; i < mIRate; i++) v.rState[i] = 0;
+ }
+
+
+ private:
+
+ /**@name Survivors and candidates. */
+ //@{
+ vCand mSurvivors[mIStates]; ///< current survivor pool
+ vCand mCandidates[2*mIStates]; ///< current candidate pool
+ //@}
+
+ public:
+
+ unsigned iRate() const { return mIRate; }
+ uint32_t cMask() const { return mCMask; }
+ uint32_t stateTable(unsigned g, unsigned i) const { return mStateTable[g][i]; }
+ unsigned deferral() const { return mDeferral; }
+
+
+ ViterbiTCH_AFS7_4();
+
+ /** Set all cost metrics to zero. */
+ void initializeStates();
+
+ /**
+ Full cycle of the Viterbi algorithm: branch, metrics, prune, select.
+ @return reference to minimum-cost candidate.
+ */
+ const vCand& step(uint32_t inSample, const float *probs, const float *iprobs);
+
+ private:
+
+ /** Branch survivors into new candidates. */
+ void branchCandidates();
+
+ /** Compute cost metrics for soft-inputs. */
+ void getSoftCostMetrics(uint32_t inSample, const float *probs, const float *iprobs);
+
+ /** Select survivors from the candidate set. */
+ void pruneCandidates();
+
+ /** Find the minimum cost survivor. */
+ const vCand& minCost() const;
+
+ /**
+ Precompute the state tables.
+ @param g Generator index 0..((1/rate)-1)
+ */
+ void computeStateTables(unsigned g);
+
+ /**
+ Precompute the generator outputs.
+ mCoeffs must be defined first.
+ */
+ void computeGeneratorTable();
+ void encode(const BitVector &in, BitVector& target) const;
+ void decode(const SoftVector &in, BitVector& target);
+
+};
+
+
+
+/**
+ Class to represent recursive systematic convolutional coders/decoders of rate 1/4, memory length 4.
+*/
+class ViterbiTCH_AFS6_7 : public ViterbiBase {
+
+ private:
+ /**name Lots of precomputed elements so the compiler can optimize like hell. */
+ //@{
+ /**@name Core values. */
+ //@{
+ static const unsigned mIRate = 4; ///< reciprocal of rate
+ static const unsigned mOrder = 4; ///< memory length of generators
+ //@}
+ /**@name Derived values. */
+ //@{
+ static const unsigned mIStates = 0x01 << mOrder; ///< number of states, number of survivors
+ static const uint32_t mSMask = mIStates-1; ///< survivor mask
+ static const uint32_t mCMask = (mSMask<<1) | 0x01; ///< candidate mask
+ static const uint32_t mOMask = (0x01<<mIRate)-1; ///< ouput mask, all iRate low bits set
+ static const unsigned mNumCands = mIStates*2; ///< number of candidates to generate during branching
+ static const unsigned mDeferral = 6*mOrder; ///< deferral to be used
+ //@}
+ //@}
+
+ /** Precomputed tables. */
+ //@{
+ uint32_t mCoeffs[mIRate]; ///< output polynomial for each generator
+ uint32_t mCoeffsFB[mIRate]; ///< feedback polynomial for each generator
+ uint32_t mStateTable[mIRate][2*mIStates]; ///< precomputed generator output tables
+ uint32_t mGeneratorTable[2*mIStates]; ///< precomputed coder output table
+ //@}
+
+ public:
+
+ /**
+ A candidate sequence in a Viterbi decoder.
+ The 32-bit state register can support a deferral of 6 with a 4th-order coder.
+ */
+ typedef struct candStruct {
+ uint32_t iState; ///< encoder input associated with this candidate
+ uint32_t oState; ///< encoder output associated with this candidate
+ char rState[mIRate];///< real states of encoders associated with this candidate
+ float cost; ///< cost (metric value), float to support soft inputs
+ } vCand;
+
+ /** Clear a structure. */
+ void vitClear(vCand& v)
+ {
+ v.iState=0;
+ v.oState=0;
+ v.cost=0;
+ for (unsigned i = 0; i < mIRate; i++) v.rState[i] = 0;
+ }
+
+
+ private:
+
+ /**@name Survivors and candidates. */
+ //@{
+ vCand mSurvivors[mIStates]; ///< current survivor pool
+ vCand mCandidates[2*mIStates]; ///< current candidate pool
+ //@}
+
+ public:
+
+ unsigned iRate() const { return mIRate; }
+ uint32_t cMask() const { return mCMask; }
+ uint32_t stateTable(unsigned g, unsigned i) const { return mStateTable[g][i]; }
+ unsigned deferral() const { return mDeferral; }
+
+
+ ViterbiTCH_AFS6_7();
+
+ /** Set all cost metrics to zero. */
+ void initializeStates();
+
+ /**
+ Full cycle of the Viterbi algorithm: branch, metrics, prune, select.
+ @return reference to minimum-cost candidate.
+ */
+ const vCand& step(uint32_t inSample, const float *probs, const float *iprobs);
+
+ private:
+
+ /** Branch survivors into new candidates. */
+ void branchCandidates();
+
+ /** Compute cost metrics for soft-inputs. */
+ void getSoftCostMetrics(uint32_t inSample, const float *probs, const float *iprobs);
+
+ /** Select survivors from the candidate set. */
+ void pruneCandidates();
+
+ /** Find the minimum cost survivor. */
+ const vCand& minCost() const;
+
+ /**
+ Precompute the state tables.
+ @param g Generator index 0..((1/rate)-1)
+ */
+ void computeStateTables(unsigned g);
+
+ /**
+ Precompute the generator outputs.
+ mCoeffs must be defined first.
+ */
+ void computeGeneratorTable();
+ void encode(const BitVector &in, BitVector& target) const;
+ void decode(const SoftVector &in, BitVector& target);
+
+};
+
+
+
+/**
+ Class to represent recursive systematic convolutional coders/decoders of rate 1/4, memory length 6.
+*/
+class ViterbiTCH_AFS5_9 : public ViterbiBase {
+
+ private:
+ /**name Lots of precomputed elements so the compiler can optimize like hell. */
+ //@{
+ /**@name Core values. */
+ //@{
+ static const unsigned mIRate = 4; ///< reciprocal of rate
+ static const unsigned mOrder = 6; ///< memory length of generators
+ //@}
+ /**@name Derived values. */
+ //@{
+ static const unsigned mIStates = 0x01 << mOrder; ///< number of states, number of survivors
+ static const uint32_t mSMask = mIStates-1; ///< survivor mask
+ static const uint32_t mCMask = (mSMask<<1) | 0x01; ///< candidate mask
+ static const uint32_t mOMask = (0x01<<mIRate)-1; ///< ouput mask, all iRate low bits set
+ static const unsigned mNumCands = mIStates*2; ///< number of candidates to generate during branching
+ static const unsigned mDeferral = 5*mOrder; ///< deferral to be used
+ //@}
+ //@}
+
+ /** Precomputed tables. */
+ //@{
+ uint32_t mCoeffs[mIRate]; ///< output polynomial for each generator
+ uint32_t mCoeffsFB[mIRate]; ///< feedback polynomial for each generator
+ uint32_t mStateTable[mIRate][2*mIStates]; ///< precomputed generator output tables
+ uint32_t mGeneratorTable[2*mIStates]; ///< precomputed coder output table
+ //@}
+
+ public:
+
+ /**
+ A candidate sequence in a Viterbi decoder.
+ The 32-bit state register can support a deferral of 5*order with a 6th-order coder.
+ */
+ typedef struct candStruct {
+ uint32_t iState; ///< encoder input associated with this candidate
+ uint32_t oState; ///< encoder output associated with this candidate
+ char rState[mIRate];///< real states of encoders associated with this candidate
+ float cost; ///< cost (metric value), float to support soft inputs
+ } vCand;
+
+ /** Clear a structure. */
+ void vitClear(vCand& v)
+ {
+ v.iState=0;
+ v.oState=0;
+ v.cost=0;
+ for (unsigned i = 0; i < mIRate; i++) v.rState[i] = 0;
+ }
+
+
+ private:
+
+ /**@name Survivors and candidates. */
+ //@{
+ vCand mSurvivors[mIStates]; ///< current survivor pool
+ vCand mCandidates[2*mIStates]; ///< current candidate pool
+ //@}
+
+ public:
+
+ unsigned iRate() const { return mIRate; }
+ uint32_t cMask() const { return mCMask; }
+ uint32_t stateTable(unsigned g, unsigned i) const { return mStateTable[g][i]; }
+ unsigned deferral() const { return mDeferral; }
+
+
+ ViterbiTCH_AFS5_9();
+
+ /** Set all cost metrics to zero. */
+ void initializeStates();
+
+ /**
+ Full cycle of the Viterbi algorithm: branch, metrics, prune, select.
+ @return reference to minimum-cost candidate.
+ */
+ const vCand& step(uint32_t inSample, const float *probs, const float *iprobs);
+
+ private:
+
+ /** Branch survivors into new candidates. */
+ void branchCandidates();
+
+ /** Compute cost metrics for soft-inputs. */
+ void getSoftCostMetrics(uint32_t inSample, const float *probs, const float *iprobs);
+
+ /** Select survivors from the candidate set. */
+ void pruneCandidates();
+
+ /** Find the minimum cost survivor. */
+ const vCand& minCost() const;
+
+ /**
+ Precompute the state tables.
+ @param g Generator index 0..((1/rate)-1)
+ */
+ void computeStateTables(unsigned g);
+
+ /**
+ Precompute the generator outputs.
+ mCoeffs must be defined first.
+ */
+ void computeGeneratorTable();
+ void encode(const BitVector &in, BitVector& target) const;
+ void decode(const SoftVector &in, BitVector& target);
+
+};
+
+
+
+/**
+ Class to represent recursive systematic convolutional coders/decoders of rate 1/5, memory length 4.
+*/
+class ViterbiTCH_AFS5_15 : public ViterbiBase {
+
+ private:
+ /**name Lots of precomputed elements so the compiler can optimize like hell. */
+ //@{
+ /**@name Core values. */
+ //@{
+ static const unsigned mIRate = 5; ///< reciprocal of rate
+ static const unsigned mOrder = 4; ///< memory length of generators
+ //@}
+ /**@name Derived values. */
+ //@{
+ static const unsigned mIStates = 0x01 << mOrder; ///< number of states, number of survivors
+ static const uint32_t mSMask = mIStates-1; ///< survivor mask
+ static const uint32_t mCMask = (mSMask<<1) | 0x01; ///< candidate mask
+ static const uint32_t mOMask = (0x01<<mIRate)-1; ///< ouput mask, all iRate low bits set
+ static const unsigned mNumCands = mIStates*2; ///< number of candidates to generate during branching
+ static const unsigned mDeferral = 6*mOrder; ///< deferral to be used
+ //@}
+ //@}
+
+ /** Precomputed tables. */
+ //@{
+ uint32_t mCoeffs[mIRate]; ///< output polynomial for each generator
+ uint32_t mCoeffsFB[mIRate]; ///< feedback polynomial for each generator
+ uint32_t mStateTable[mIRate][2*mIStates]; ///< precomputed generator output tables
+ uint32_t mGeneratorTable[2*mIStates]; ///< precomputed coder output table
+ //@}
+
+ public:
+
+ /**
+ A candidate sequence in a Viterbi decoder.
+ The 32-bit state register can support a deferral of 6 with a 4th-order coder.
+ */
+ typedef struct candStruct {
+ uint32_t iState; ///< encoder input associated with this candidate
+ uint32_t oState; ///< encoder output associated with this candidate
+ char rState[mIRate];///< real states of encoders associated with this candidate
+ float cost; ///< cost (metric value), float to support soft inputs
+ } vCand;
+
+ /** Clear a structure. */
+ void vitClear(vCand& v)
+ {
+ v.iState=0;
+ v.oState=0;
+ v.cost=0;
+ for (unsigned i = 0; i < mIRate; i++) v.rState[i] = 0;
+ }
+
+
+ private:
+
+ /**@name Survivors and candidates. */
+ //@{
+ vCand mSurvivors[mIStates]; ///< current survivor pool
+ vCand mCandidates[2*mIStates]; ///< current candidate pool
+ //@}
+
+ public:
+
+ unsigned iRate() const { return mIRate; }
+ uint32_t cMask() const { return mCMask; }
+ uint32_t stateTable(unsigned g, unsigned i) const { return mStateTable[g][i]; }
+ unsigned deferral() const { return mDeferral; }
+
+
+ ViterbiTCH_AFS5_15();
+
+ /** Set all cost metrics to zero. */
+ void initializeStates();
+
+ /**
+ Full cycle of the Viterbi algorithm: branch, metrics, prune, select.
+ @return reference to minimum-cost candidate.
+ */
+ const vCand& step(uint32_t inSample, const float *probs, const float *iprobs);
+
+ private:
+
+ /** Branch survivors into new candidates. */
+ void branchCandidates();
+
+ /** Compute cost metrics for soft-inputs. */
+ void getSoftCostMetrics(uint32_t inSample, const float *probs, const float *iprobs);
+
+ /** Select survivors from the candidate set. */
+ void pruneCandidates();
+
+ /** Find the minimum cost survivor. */
+ const vCand& minCost() const;
+
+ /**
+ Precompute the state tables.
+ @param g Generator index 0..((1/rate)-1)
+ */
+ void computeStateTables(unsigned g);
+
+ /**
+ Precompute the generator outputs.
+ mCoeffs must be defined first.
+ */
+ void computeGeneratorTable();
+ void encode(const BitVector &in, BitVector& target) const;
+ void decode(const SoftVector &in, BitVector& target);
+
+};
+
+
+
+/**
+ Class to represent recursive systematic convolutional coders/decoders of rate 1/5, memory length 6.
+*/
+class ViterbiTCH_AFS4_75 : public ViterbiBase {
+
+ private:
+ /**name Lots of precomputed elements so the compiler can optimize like hell. */
+ //@{
+ /**@name Core values. */
+ //@{
+ static const unsigned mIRate = 5; ///< reciprocal of rate
+ static const unsigned mOrder = 6; ///< memory length of generators
+ //@}
+ /**@name Derived values. */
+ //@{
+ static const unsigned mIStates = 0x01 << mOrder; ///< number of states, number of survivors
+ static const uint32_t mSMask = mIStates-1; ///< survivor mask
+ static const uint32_t mCMask = (mSMask<<1) | 0x01; ///< candidate mask
+ static const uint32_t mOMask = (0x01<<mIRate)-1; ///< ouput mask, all iRate low bits set
+ static const unsigned mNumCands = mIStates*2; ///< number of candidates to generate during branching
+ static const unsigned mDeferral = 5*mOrder; ///< deferral to be used
+ //@}
+ //@}
+
+ /** Precomputed tables. */
+ //@{
+ uint32_t mCoeffs[mIRate]; ///< output polynomial for each generator
+ uint32_t mCoeffsFB[mIRate]; ///< feedback polynomial for each generator
+ uint32_t mStateTable[mIRate][2*mIStates]; ///< precomputed generator output tables
+ uint32_t mGeneratorTable[2*mIStates]; ///< precomputed coder output table
+ //@}
+
+ public:
+
+ /**
+ A candidate sequence in a Viterbi decoder.
+ The 32-bit state register can support a deferral of 5*order with a 6th-order coder.
+ */
+ typedef struct candStruct {
+ uint32_t iState; ///< encoder input associated with this candidate
+ uint32_t oState; ///< encoder output associated with this candidate
+ char rState[mIRate];///< real states of encoders associated with this candidate
+ float cost; ///< cost (metric value), float to support soft inputs
+ } vCand;
+
+ /** Clear a structure. */
+ void vitClear(vCand& v)
+ {
+ v.iState=0;
+ v.oState=0;
+ v.cost=0;
+ for (unsigned i = 0; i < mIRate; i++) v.rState[i] = 0;
+ }
+
+
+ private:
+
+ /**@name Survivors and candidates. */
+ //@{
+ vCand mSurvivors[mIStates]; ///< current survivor pool
+ vCand mCandidates[2*mIStates]; ///< current candidate pool
+ //@}
+
+ public:
+
+ unsigned iRate() const { return mIRate; }
+ uint32_t cMask() const { return mCMask; }
+ uint32_t stateTable(unsigned g, unsigned i) const { return mStateTable[g][i]; }
+ unsigned deferral() const { return mDeferral; }
+
+
+ ViterbiTCH_AFS4_75();
+
+ /** Set all cost metrics to zero. */
+ void initializeStates();
+
+ /**
+ Full cycle of the Viterbi algorithm: branch, metrics, prune, select.
+ @return reference to minimum-cost candidate.
+ */
+ const vCand& step(uint32_t inSample, const float *probs, const float *iprobs);
+
+ private:
+
+ /** Branch survivors into new candidates. */
+ void branchCandidates();
+
+ /** Compute cost metrics for soft-inputs. */
+ void getSoftCostMetrics(uint32_t inSample, const float *probs, const float *iprobs);
+
+ /** Select survivors from the candidate set. */
+ void pruneCandidates();
+
+ /** Find the minimum cost survivor. */
+ const vCand& minCost() const;
+
+ /**
+ Precompute the state tables.
+ @param g Generator index 0..((1/rate)-1)
+ */
+ void computeStateTables(unsigned g);
+
+ /**
+ Precompute the generator outputs.
+ mCoeffs must be defined first.
+ */
+ void computeGeneratorTable();
+ void encode(const BitVector &in, BitVector& target) const;
+ void decode(const SoftVector &in, BitVector& target);
+
+};
+
+
+
+
+#endif
diff --git a/lib/decoding/BitVector.cpp b/lib/decoding/BitVector.cpp
index 89d8d19..86f326a 100644
--- a/lib/decoding/BitVector.cpp
+++ b/lib/decoding/BitVector.cpp
@@ -1,21 +1,26 @@
/*
-* Copyright 2008 Free Software Foundation, Inc.
+* Copyright 2008, 2009, 2014 Free Software Foundation, Inc.
+* Copyright 2014 Range Networks, Inc.
*
-* This software is distributed under the terms of the GNU Public License.
+*
+* This software is distributed under the terms of the GNU Affero Public License.
* See the COPYING file in the main directory for details.
+*
+* This use of this software may be subject to additional restrictions.
+* See the LEGAL file in the main directory for details.
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -24,50 +29,38 @@
#include "BitVector.h"
#include <iostream>
+#include <stdio.h>
+#include <sstream>
+#include <string.h>
+//#include <Logger.h>
using namespace std;
-/**
- Apply a Galois polymonial to a binary seqeunce.
- @param val The input sequence.
- @param poly The polynomial.
- @param order The order of the polynomial.
- @return Single-bit result.
-*/
-unsigned applyPoly(uint64_t val, uint64_t poly, unsigned order)
-{
- uint64_t prod = val & poly;
- unsigned sum = prod;
- for (unsigned i=1; i<order; i++) sum ^= prod>>i;
- return sum & 0x01;
-}
-
-
-
-
-
BitVector::BitVector(const char *valString)
- :Vector<char>(strlen(valString))
{
- uint32_t accum = 0;
- for (size_t i=0; i<size(); i++) {
- accum <<= 1;
- if (valString[i]=='1') accum |= 0x01;
- mStart[i] = accum;
+ // 1-30-2013 pat: I dont know what this was intended to do, but it did not create a normalized BitVector,
+ // and it could even fail if the accum overlows 8 bits.
+ //uint32_t accum = 0;
+ //for (size_t i=0; i<size(); i++) {
+ // accum <<= 1;
+ // if (valString[i]=='1') accum |= 0x01;
+ // mStart[i] = accum;
+ //}
+ vInit(strlen(valString));
+ char *rp = begin();
+ for (const char *cp = valString; *cp; cp++, rp++) {
+ *rp = (*cp == '1');
}
}
-
-
-
uint64_t BitVector::peekField(size_t readIndex, unsigned length) const
{
uint64_t accum = 0;
char *dp = mStart + readIndex;
- assert(dp+length <= mEnd);
+
for (unsigned i=0; i<length; i++) {
accum = (accum<<1) | ((*dp++) & 0x01);
}
@@ -75,6 +68,22 @@ uint64_t BitVector::peekField(size_t readIndex, unsigned length) const
}
+
+
+uint64_t BitVector::peekFieldReversed(size_t readIndex, unsigned length) const
+{
+ uint64_t accum = 0;
+ char *dp = mStart + readIndex + length - 1;
+ assert(dp<mEnd);
+ for (int i=(length-1); i>=0; i--) {
+ accum = (accum<<1) | ((*dp--) & 0x01);
+ }
+ return accum;
+}
+
+
+
+
uint64_t BitVector::readField(size_t& readIndex, unsigned length) const
{
const uint64_t retVal = peekField(readIndex,length);
@@ -83,21 +92,63 @@ uint64_t BitVector::readField(size_t& readIndex, unsigned length) const
}
+uint64_t BitVector::readFieldReversed(size_t& readIndex, unsigned length) const
+{
+
+ const uint64_t retVal = peekFieldReversed(readIndex,length);
+ readIndex += length;
+ return retVal;
+
+}
+
+
+
+
void BitVector::fillField(size_t writeIndex, uint64_t value, unsigned length)
{
- char *dpBase = mStart + writeIndex;
- char *dp = dpBase + length - 1;
- assert(dp < mEnd);
- while (dp>=dpBase) {
- *dp-- = value & 0x01;
- value >>= 1;
+ if (length != 0) {
+ char *dpBase = mStart + writeIndex;
+ char *dp = dpBase + length - 1;
+ assert(dp < mEnd);
+ while (dp>=dpBase) {
+ *dp-- = value & 0x01;
+ value >>= 1;
+ }
}
}
+
+void BitVector::fillFieldReversed(size_t writeIndex, uint64_t value, unsigned length)
+{
+ if (length != 0) {
+ char *dp = mStart + writeIndex;
+ char *dpEnd = dp + length - 1;
+ assert(dpEnd < mEnd);
+ while (dp<=dpEnd) {
+ *dp++ = value & 0x01;
+ value >>= 1;
+ }
+ }
+}
+
+
+
+
void BitVector::writeField(size_t& writeIndex, uint64_t value, unsigned length)
{
- fillField(writeIndex,value,length);
- writeIndex += length;
+ if (length != 0) {
+ fillField(writeIndex,value,length);
+ writeIndex += length;
+ }
+}
+
+
+void BitVector::writeFieldReversed(size_t& writeIndex, uint64_t value, unsigned length)
+{
+ if (length != 0) {
+ fillFieldReversed(writeIndex,value,length);
+ writeIndex += length;
+ }
}
@@ -136,6 +187,7 @@ void BitVector::reverse8()
void BitVector::LSB8MSB()
{
+ if (size()<8) return;
size_t size8 = 8*(size()/8);
size_t iTop = size8 - 8;
for (size_t i=0; i<=iTop; i+=8) segment(i,8).reverse8();
@@ -161,31 +213,6 @@ uint64_t BitVector::parity(Generator& gen) const
}
-void BitVector::encode(const ViterbiR2O4& coder, BitVector& target)
-{
- size_t sz = size();
- assert(sz*coder.iRate() == target.size());
-
- // Build a "history" array where each element contains the full history.
- uint32_t history[sz];
- uint32_t accum = 0;
- for (size_t i=0; i<sz; i++) {
- accum = (accum<<1) | bit(i);
- history[i] = accum;
- }
-
- // Look up histories in the pre-generated state table.
- char *op = target.begin();
- for (size_t i=0; i<sz; i++) {
- unsigned index = coder.cMask() & history[i];
- for (unsigned g=0; g<coder.iRate(); g++) {
- *op++ = coder.stateTable(g,index);
- }
- }
-}
-
-
-
unsigned BitVector::sum() const
{
unsigned sum = 0;
@@ -219,9 +246,6 @@ void BitVector::unmap(const unsigned *map, size_t mapSize, BitVector& dest) cons
-
-
-
ostream& operator<<(ostream& os, const BitVector& hv)
{
for (size_t i=0; i<hv.size(); i++) {
@@ -234,121 +258,6 @@ ostream& operator<<(ostream& os, const BitVector& hv)
-ViterbiR2O4::ViterbiR2O4()
-{
- assert(mDeferral < 32);
- mCoeffs[0] = 0x019;
- mCoeffs[1] = 0x01b;
- computeStateTables(0);
- computeStateTables(1);
- computeGeneratorTable();
-}
-
-
-
-
-void ViterbiR2O4::initializeStates()
-{
- for (unsigned i=0; i<mIStates; i++) clear(mSurvivors[i]);
- for (unsigned i=0; i<mNumCands; i++) clear(mCandidates[i]);
-}
-
-
-
-void ViterbiR2O4::computeStateTables(unsigned g)
-{
- assert(g<mIRate);
- for (unsigned state=0; state<mIStates; state++) {
- // 0 input
- uint32_t inputVal = state<<1;
- mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g], mOrder+1);
- // 1 input
- inputVal |= 1;
- mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g], mOrder+1);
- }
-}
-
-void ViterbiR2O4::computeGeneratorTable()
-{
- for (unsigned index=0; index<mIStates*2; index++) {
- mGeneratorTable[index] = (mStateTable[0][index]<<1) | mStateTable[1][index];
- }
-}
-
-
-
-
-
-
-void ViterbiR2O4::branchCandidates()
-{
- // Branch to generate new input states.
- const vCand *sp = mSurvivors;
- for (unsigned i=0; i<mNumCands; i+=2) {
- // extend and suffix
- const uint32_t iState0 = (sp->iState) << 1; // input state for 0
- const uint32_t iState1 = iState0 | 0x01; // input state for 1
- const uint32_t oStateShifted = (sp->oState) << mIRate; // shifted output
- const float cost = sp->cost;
- sp++;
- // 0 input extension
- mCandidates[i].cost = cost;
- mCandidates[i].oState = oStateShifted | mGeneratorTable[iState0 & mCMask];
- mCandidates[i].iState = iState0;
- // 1 input extension
- mCandidates[i+1].cost = cost;
- mCandidates[i+1].oState = oStateShifted | mGeneratorTable[iState1 & mCMask];
- mCandidates[i+1].iState = iState1;
- }
-}
-
-
-void ViterbiR2O4::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
-{
- const float *cTab[2] = {matchCost,mismatchCost};
- for (unsigned i=0; i<mNumCands; i++) {
- vCand& thisCand = mCandidates[i];
- // We examine input bits 2 at a time for a rate 1/2 coder.
- const unsigned mismatched = inSample ^ (thisCand.oState);
- thisCand.cost += cTab[mismatched&0x01][1] + cTab[(mismatched>>1)&0x01][0];
- }
-}
-
-
-void ViterbiR2O4::pruneCandidates()
-{
- const vCand* c1 = mCandidates; // 0-prefix
- const vCand* c2 = mCandidates + mIStates; // 1-prefix
- for (unsigned i=0; i<mIStates; i++) {
- if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
- else mSurvivors[i] = c2[i];
- }
-}
-
-
-const ViterbiR2O4::vCand& ViterbiR2O4::minCost() const
-{
- int minIndex = 0;
- float minCost = mSurvivors[0].cost;
- for (unsigned i=1; i<mIStates; i++) {
- const float thisCost = mSurvivors[i].cost;
- if (thisCost>=minCost) continue;
- minCost = thisCost;
- minIndex=i;
- }
- return mSurvivors[minIndex];
-}
-
-
-const ViterbiR2O4::vCand& ViterbiR2O4::step(uint32_t inSample, const float *probs, const float *iprobs)
-{
- branchCandidates();
- getSoftCostMetrics(inSample,probs,iprobs);
- pruneCandidates();
- return minCost();
-}
-
-
uint64_t Parity::syndrome(const BitVector& receivedCodeword)
{
return receivedCodeword.syndrome(*this);
@@ -358,7 +267,7 @@ uint64_t Parity::syndrome(const BitVector& receivedCodeword)
void Parity::writeParityWord(const BitVector& data, BitVector& parityTarget, bool invert)
{
uint64_t pWord = data.parity(*this);
- if (invert) pWord = ~pWord;
+ if (invert) pWord = ~pWord;
parityTarget.fillField(0,pWord,size());
}
@@ -393,84 +302,54 @@ BitVector SoftVector::sliced() const
-void SoftVector::decode(ViterbiR2O4 &decoder, BitVector& target) const
+// (pat) Added 6-22-2012
+float SoftVector::getEnergy(float *plow) const
{
- const size_t sz = size();
- const unsigned deferral = decoder.deferral();
- const size_t ctsz = sz + deferral;
- assert(sz <= decoder.iRate()*target.size());
-
- // Build a "history" array where each element contains the full history.
- uint32_t history[ctsz];
- {
- BitVector bits = sliced();
- uint32_t accum = 0;
- for (size_t i=0; i<sz; i++) {
- accum = (accum<<1) | bits.bit(i);
- history[i] = accum;
- }
- // Repeat last bit at the end.
- for (size_t i=sz; i<ctsz; i++) {
- accum = (accum<<1) | (accum & 0x01);
- history[i] = accum;
- }
+ const SoftVector &vec = *this;
+ int len = vec.size();
+ float avg = 0; float low = 1;
+ for (int i = 0; i < len; i++) {
+ float bit = vec[i];
+ float energy = 2*((bit < 0.5) ? (0.5-bit) : (bit-0.5));
+ if (energy < low) low = energy;
+ avg += energy/len;
}
-
- // Precompute metric tables.
- float matchCostTable[ctsz];
- float mismatchCostTable[ctsz];
- {
- const float *dp = mStart;
- for (size_t i=0; i<sz; i++) {
- // pVal is the probability that a bit is correct.
- // ipVal is the probability that a bit is correct.
- float pVal = dp[i];
- if (pVal>0.5F) pVal = 1.0F-pVal;
- float ipVal = 1.0F-pVal;
- // This is a cheap approximation to an ideal cost function.
- if (pVal<0.01F) pVal = 0.01;
- if (ipVal<0.01F) ipVal = 0.01;
- matchCostTable[i] = 0.25F/ipVal;
- mismatchCostTable[i] = 0.25F/pVal;
- }
-
- // pad end of table with unknowns
- for (size_t i=sz; i<ctsz; i++) {
- matchCostTable[i] = 0.5F;
- mismatchCostTable[i] = 0.5F;
- }
- }
-
- {
- decoder.initializeStates();
- // Each sample of history[] carries its history.
- // So we only have to process every iRate-th sample.
- const unsigned step = decoder.iRate();
- // input pointer
- const uint32_t *ip = history + step - 1;
- // output pointers
- char *op = target.begin();
- const char *const opt = target.end();
- // table pointers
- const float* match = matchCostTable;
- const float* mismatch = mismatchCostTable;
- size_t oCount = 0;
- while (op<opt) {
- // Viterbi algorithm
- const ViterbiR2O4::vCand &minCost = decoder.step(*ip, match, mismatch);
- ip += step;
- match += step;
- mismatch += step;
- // output
- if (oCount>=deferral) *op++ = (minCost.iState >> deferral);
- oCount++;
+ if (plow) { *plow = low; }
+ return avg;
+}
+
+// (pat) Added 1-2014. Compute SNR of a soft vector. Very similar to above.
+// Since we dont really know what the expected signal values are, we will assume that the signal is 0 or 1
+// and return the SNR on that basis.
+// SNR is power(signal) / power(noise) where power can be calculated as (RMS(signal) / RMS(noise))**2 of the values.
+// Since RMS is square-rooted, ie RMS = sqrt(1/n * (x1**2 + x2**2 ...)), we just add up the squares.
+// To compute RMS of the signal we will remove any constant offset, so the signal values are either 0.5 or -0.5,
+// so the RMS of the signal is just 0.5**2 * len; all we need to compute is the noise component.
+float SoftVector::getSNR() const
+{
+ float sumSquaresNoise = 0;
+ const SoftVector &vec = *this;
+ int len = vec.size();
+ if (len == 0) { return 0.0; }
+ for (int i = 0; i < len; i++) {
+ float bit = vec[i];
+ if (bit < 0.5) {
+ // Assume signal is 0.
+ sumSquaresNoise += (bit - 0.0) * (bit - 0.0);
+ } else {
+ // Assume signal is 1.
+ sumSquaresNoise += (bit - 1.0) * (bit - 1.0);
}
}
+ float sumSquaresSignal = 0.5 * 0.5 * len;
+ // I really want log10 of this to convert to dB, but log is expensive, and Harvind seems to like absolute SNR.
+ // Clamp max to 999; it shouldnt get up there but be sure. This also avoids divide by zero.
+ if (sumSquaresNoise * 1000 < sumSquaresSignal) return 999;
+ return sumSquaresSignal / sumSquaresNoise;
}
-
ostream& operator<<(ostream& os, const SoftVector& sv)
{
for (size_t i=0; i<sv.size(); i++) {
@@ -496,6 +375,50 @@ void BitVector::pack(unsigned char* targ) const
targ[bytes] = peekField(whole,rem) << (8-rem);
}
+void BitVector::pack2(unsigned char* targ) const
+{
+ unsigned int i;
+ unsigned char curbyte = 0;
+
+ for (i = 0; i < size(); i++)
+ {
+ uint8_t bitnum = 7 - (i % 8);
+ curbyte |= ((char)bit(i) << bitnum);
+ if(i % 8 == 7){
+ *targ++ = curbyte;
+ curbyte = 0;
+ }
+ }
+
+ // Assumes MSB-first packing.
+// unsigned bytes = size()/8;
+// for (unsigned i=0; i<bytes; i++) {
+// targ[i] = peekField(i*8,8);
+// }
+// unsigned whole = bytes*8;
+// unsigned rem = size() - whole;
+// if (rem==0) return;
+// targ[bytes] = peekField(whole,rem) << (8-rem);
+}
+
+
+
+string BitVector::packToString() const
+{
+ string result;
+ result.reserve((size()+7)/8);
+ // Tempting to call this->pack(result.c_str()) but technically c_str() is read-only.
+ unsigned bytes = size()/8;
+ for (unsigned i=0; i<bytes; i++) {
+ result.push_back(peekField(i*8,8));
+ }
+ unsigned whole = bytes*8;
+ unsigned rem = size() - whole;
+ if (rem==0) return result;
+ result.push_back(peekField(whole,rem) << (8-rem));
+ return result;
+}
+
void BitVector::unpack(const unsigned char* src)
{
@@ -507,7 +430,104 @@ void BitVector::unpack(const unsigned char* src)
unsigned whole = bytes*8;
unsigned rem = size() - whole;
if (rem==0) return;
- fillField(whole,src[bytes],rem);
+ fillField(whole,src[bytes] >> (8-rem),rem);
+}
+
+void BitVector::hex(ostream& os) const
+{
+ os << std::hex;
+ unsigned digits = size()/4;
+ size_t wp=0;
+ for (unsigned i=0; i<digits; i++) {
+ os << readField(wp,4);
+ }
+ os << std::dec;
+}
+
+std::string BitVector::hexstr() const
+{
+ std::ostringstream ss;
+ hex(ss);
+ return ss.str();
+}
+
+
+bool BitVector::unhex(const char* src)
+{
+ // Assumes MSB-first packing.
+ unsigned int val;
+ unsigned digits = size()/4;
+ for (unsigned i=0; i<digits; i++) {
+ if (sscanf(src+i, "%1x", &val) < 1) {
+ return false;
+ }
+ fillField(i*4,val,4);
+ }
+ unsigned whole = digits*4;
+ unsigned rem = size() - whole;
+ if (rem>0) {
+ if (sscanf(src+digits, "%1x", &val) < 1) {
+ return false;
+ }
+ fillField(whole,val,rem);
+ }
+ return true;
+}
+
+bool BitVector::operator==(const BitVector &other) const
+{
+ unsigned l = size();
+ return l == other.size() && 0==memcmp(begin(),other.begin(),l);
+}
+
+void BitVector::copyPunctured(BitVector &dst, const unsigned *puncture, const size_t plth)
+{
+ assert(size() - plth == dst.size());
+ char *srcp = mStart;
+ char *dstp = dst.mStart;
+ const unsigned *pend = puncture + plth;
+ while (srcp < mEnd) {
+ if (puncture < pend) {
+ int n = (*puncture++) - (srcp - mStart);
+ assert(n >= 0);
+ for (int i = 0; i < n; i++) {
+ assert(srcp < mEnd && dstp < dst.mEnd);
+ *dstp++ = *srcp++;
+ }
+ srcp++;
+ } else {
+ while (srcp < mEnd) {
+ assert(dstp < dst.mEnd);
+ *dstp++ = *srcp++;
+ }
+ }
+ }
+ assert(dstp == dst.mEnd && puncture == pend);
+}
+
+void SoftVector::copyUnPunctured(SoftVector &dst, const unsigned *puncture, const size_t plth)
+{
+ assert(size() + plth == dst.size());
+ float *srcp = mStart;
+ float *dstp = dst.mStart;
+ const unsigned *pend = puncture + plth;
+ while (dstp < dst.mEnd) {
+ if (puncture < pend) {
+ int n = (*puncture++) - (dstp - dst.mStart);
+ assert(n >= 0);
+ for (int i = 0; i < n; i++) {
+ assert(srcp < mEnd && dstp < dst.mEnd);
+ *dstp++ = *srcp++;
+ }
+ *dstp++ = 0.5;
+ } else {
+ while (srcp < mEnd) {
+ assert(dstp < dst.mEnd);
+ *dstp++ = *srcp++;
+ }
+ }
+ }
+ assert(dstp == dst.mEnd && puncture == pend);
}
// vim: ts=4 sw=4
diff --git a/lib/decoding/BitVector.h b/lib/decoding/BitVector.h
index 3019c2c..0899817 100644
--- a/lib/decoding/BitVector.h
+++ b/lib/decoding/BitVector.h
@@ -1,30 +1,35 @@
/*
-* Copyright 2008 Free Software Foundation, Inc.
+* Copyright 2008, 2009, 2014 Free Software Foundation, Inc.
+* Copyright 2014 Range Networks, Inc.
*
-* This software is distributed under the terms of the GNU Public License.
+* This software is distributed under the terms of the GNU Affero Public License.
* See the COPYING file in the main directory for details.
+*
+* This use of this software may be subject to additional restrictions.
+* See the LEGAL file in the main directory for details.
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef FECVECTORS_H
-#define FECVECTORS_H
+#ifndef BITVECTORS_H
+#define BITVECTORS_H
#include "Vector.h"
#include <stdint.h>
+#include <stdio.h>
class BitVector;
@@ -32,6 +37,7 @@ class SoftVector;
+
/** Shift-register (LFSR) generator. */
class Generator {
@@ -109,171 +115,83 @@ class Parity : public Generator {
};
-
-
-/**
- Class to represent convolutional coders/decoders of rate 1/2, memory length 4.
- This is the "workhorse" coder for most GSM channels.
-*/
-class ViterbiR2O4 {
-
- private:
- /**name Lots of precomputed elements so the compiler can optimize like hell. */
- //@{
- /**@name Core values. */
- //@{
- static const unsigned mIRate = 2; ///< reciprocal of rate
- static const unsigned mOrder = 4; ///< memory length of generators
- //@}
- /**@name Derived values. */
- //@{
- static const unsigned mIStates = 0x01 << mOrder; ///< number of states, number of survivors
- static const uint32_t mSMask = mIStates-1; ///< survivor mask
- static const uint32_t mCMask = (mSMask<<1) | 0x01; ///< candidate mask
- static const uint32_t mOMask = (0x01<<mIRate)-1; ///< ouput mask, all iRate low bits set
- static const unsigned mNumCands = mIStates*2; ///< number of candidates to generate during branching
- static const unsigned mDeferral = 6*mOrder; ///< deferral to be used
- //@}
- //@}
-
- /** Precomputed tables. */
- //@{
- uint32_t mCoeffs[mIRate]; ///< polynomial for each generator
- uint32_t mStateTable[mIRate][2*mIStates]; ///< precomputed generator output tables
- uint32_t mGeneratorTable[2*mIStates]; ///< precomputed coder output table
- //@}
-
+// (pat) Nov 2013. I rationalized the behavior of BitVector and added assertions to core dump code
+// that relied on the bad aspects of the original behavior. See comments at VectorBase.
+class BitVector : public VectorBase<char>
+{
public:
-
- /**
- A candidate sequence in a Viterbi decoder.
- The 32-bit state register can support a deferral of 6 with a 4th-order coder.
- */
- typedef struct candStruct {
- uint32_t iState; ///< encoder input associated with this candidate
- uint32_t oState; ///< encoder output associated with this candidate
- float cost; ///< cost (metric value), float to support soft inputs
- } vCand;
-
- /** Clear a structure. */
- void clear(vCand& v)
- {
- v.iState=0;
- v.oState=0;
- v.cost=0;
- }
-
-
- private:
-
- /**@name Survivors and candidates. */
- //@{
- vCand mSurvivors[mIStates]; ///< current survivor pool
- vCand mCandidates[2*mIStates]; ///< current candidate pool
- //@}
-
- public:
-
- unsigned iRate() const { return mIRate; }
- uint32_t cMask() const { return mCMask; }
- uint32_t stateTable(unsigned g, unsigned i) const { return mStateTable[g][i]; }
- unsigned deferral() const { return mDeferral; }
-
-
- ViterbiR2O4();
-
- /** Set all cost metrics to zero. */
- void initializeStates();
-
- /**
- Full cycle of the Viterbi algorithm: branch, metrics, prune, select.
- @return reference to minimum-cost candidate.
- */
- const vCand& step(uint32_t inSample, const float *probs, const float *iprobs);
-
- private:
-
- /** Branch survivors into new candidates. */
- void branchCandidates();
-
- /** Compute cost metrics for soft-inputs. */
- void getSoftCostMetrics(uint32_t inSample, const float *probs, const float *iprobs);
-
- /** Select survivors from the candidate set. */
- void pruneCandidates();
-
- /** Find the minimum cost survivor. */
- const vCand& minCost() const;
-
- /**
- Precompute the state tables.
- @param g Generator index 0..((1/rate)-1)
- */
- void computeStateTables(unsigned g);
-
- /**
- Precompute the generator outputs.
- mCoeffs must be defined first.
- */
- void computeGeneratorTable();
-
-};
-
-
-
-
-class BitVector : public Vector<char> {
-
-
- public:
-
/**@name Constructors. */
//@{
/**@name Casts of Vector constructors. */
- //@{
- BitVector(char* wData, char* wStart, char* wEnd)
- :Vector<char>(wData,wStart,wEnd)
- { }
- BitVector(size_t len=0):Vector<char>(len) {}
- BitVector(const Vector<char>& source):Vector<char>(source) {}
- BitVector(Vector<char>& source):Vector<char>(source) {}
- BitVector(const Vector<char>& source1, const Vector<char> source2):Vector<char>(source1,source2) {}
- //@}
+ BitVector(VectorDataType wData, char* wStart, char* wEnd) : VectorBase<char>(wData, wStart, wEnd) {}
+
+ // The one and only copy-constructor.
+ BitVector(const BitVector&other) : VectorBase<char>() {
+ VECTORDEBUG("BitVector(%p)",(void*)&other);
+ if (other.getData()) {
+ this->clone(other);
+ } else {
+ this->makeAlias(other);
+ }
+ }
- /** Construct from a string of "0" and "1". */
- BitVector(const char* valString);
- //@}
+ // (pat) Removed default value for len and added 'explicit'. Please do not remove 'explicit';
+ // it prevents auto-conversion of int to BitVector in constructors.
+ // Previous code was often ambiguous, especially for L3Frame and descendent constructors, leading to latent bugs.
+ explicit BitVector(size_t len) { this->vInit(len); }
+ BitVector() { this->vInit(0); }
- /** Index a single bit. */
- bool bit(size_t index) const
+ /** Build a BitVector by concatenation. */
+ BitVector(const BitVector& other1, const BitVector& other2) : VectorBase<char>()
{
- // We put this code in .h for fast inlining.
- const char *dp = mStart+index;
- assert(dp<mEnd);
- return (*dp) & 0x01;
+ assert(this->getData() == 0);
+ this->vConcat(other1,other2);
}
+ /** Construct from a string of "0" and "1". */
+ // (pat) Characters that are not '0' or '1' map to '0'.
+ BitVector(const char* valString);
+ //@}
+
/**@name Casts and overrides of Vector operators. */
//@{
+ // (pat) Please DO NOT add a const anywhere in this method. Use cloneSegment instead.
BitVector segment(size_t start, size_t span)
{
- char* wStart = mStart + start;
+ char* wStart = this->begin() + start;
char* wEnd = wStart + span;
- assert(wEnd<=mEnd);
+ assert(wEnd<=this->end());
+#if BITVECTOR_REFCNTS
+ return BitVector(mData,wStart,wEnd);
+#else
return BitVector(NULL,wStart,wEnd);
+#endif
}
- BitVector alias()
- { return segment(0,size()); }
+ // (pat) Historically the BitVector segment method had const and non-const versions with different behavior.
+ // I changed the name of the const version to cloneSegment and replaced all uses throughout OpenBTS.
+ const BitVector cloneSegment(size_t start, size_t span) const
+ {
+ BitVector seg = const_cast<BitVector*>(this)->segment(start,span);
+ // (pat) We are depending on the Return Value Optimization not to invoke the copy-constructor on the result,
+ // which would result in its immediate destruction while we are still using it.
+ BitVector result;
+ result.clone(seg);
+ return result;
+ }
- const BitVector segment(size_t start, size_t span) const
- { return (BitVector)(Vector<char>::segment(start,span)); }
+ BitVector alias() const {
+ return const_cast<BitVector*>(this)->segment(0,size());
+ }
BitVector head(size_t span) { return segment(0,span); }
- const BitVector head(size_t span) const { return segment(0,span); }
BitVector tail(size_t start) { return segment(start,size()-start); }
- const BitVector tail(size_t start) const { return segment(start,size()-start); }
+
+ // (pat) Please do NOT put the const version of head and tail back in, because historically they were messed up.
+ // Use cloneSegment instead.
+ //const BitVector head(size_t span) const { return segment(0,span); }
+ //const BitVector tail(size_t start) const { return segment(start,size()-start); }
//@}
@@ -285,8 +203,6 @@ class BitVector : public Vector<char> {
uint64_t syndrome(Generator& gen) const;
/** Calculate the parity word for the vector with the given Generator. */
uint64_t parity(Generator& gen) const;
- /** Encode the signal with the GSM rate 1/2 convolutional encoder. */
- void encode(const ViterbiR2O4& encoder, BitVector& target);
//@}
@@ -304,9 +220,16 @@ class BitVector : public Vector<char> {
/**@name Serialization and deserialization. */
//@{
uint64_t peekField(size_t readIndex, unsigned length) const;
+ uint64_t peekFieldReversed(size_t readIndex, unsigned length) const;
uint64_t readField(size_t& readIndex, unsigned length) const;
+ uint64_t readFieldReversed(size_t& readIndex, unsigned length) const;
void fillField(size_t writeIndex, uint64_t value, unsigned length);
+ void fillFieldReversed(size_t writeIndex, uint64_t value, unsigned length);
void writeField(size_t& writeIndex, uint64_t value, unsigned length);
+ void writeFieldReversed(size_t& writeIndex, uint64_t value, unsigned length);
+ void write0(size_t& writeIndex) { writeField(writeIndex,0,1); }
+ void write1(size_t& writeIndex) { writeField(writeIndex,1,1); }
+
//@}
/** Sum of bits. */
@@ -321,11 +244,79 @@ class BitVector : public Vector<char> {
/** Pack into a char array. */
void pack(unsigned char*) const;
- /** Unopack from a char array. */
+ /* Roman: This is here for debugging */
+ void pack2(unsigned char*) const;
+
+ // Same as pack but return a string.
+ std::string packToString() const;
+
+ /** Unpack from a char array. */
void unpack(const unsigned char*);
+ /** Make a hexdump string. */
+ void hex(std::ostream&) const;
+ std::string hexstr() const;
+
+ /** Unpack from a hexdump string.
+ * @returns true on success, false on error. */
+ bool unhex(const char*);
+
+ // For this method, 'other' should have been run through the copy-constructor already
+ // (unless it was newly created, ie foo.dup(L2Frame(...)), in which case we are screwed anyway)
+ // so the call to makeAlias is redundant.
+ // This only works if other is already an alias.
+ void dup(BitVector other) { assert(!this->getData()); makeAlias(other); assert(this->mStart == other.mStart); }
+ void dup(BitVector &other) { makeAlias(other); assert(this->mStart == other.mStart); }
+
+#if 0
+ void operator=(const BitVector& other) {
+ printf("BitVector::operator=\n");
+ assert(0);
+ //this->dup(other);
+ }
+#endif
+
+ bool operator==(const BitVector &other) const;
+
+ /** Copy to dst, not including those indexed in puncture. */
+ void copyPunctured(BitVector &dst, const unsigned *puncture, const size_t plth);
+
+ /** Index a single bit. */
+ // (pat) Cant have too many ways to do this, I guess.
+ bool bit(size_t index) const
+ {
+ // We put this code in .h for fast inlining.
+ const char *dp = this->begin()+index;
+ assert(dp<this->end());
+ return (*dp) & 0x01;
+ }
+
+ char& operator[](size_t index)
+ {
+ assert(this->mStart+index<this->mEnd);
+ return this->mStart[index];
+ }
+
+ const char& operator[](size_t index) const
+ {
+ assert(this->mStart+index<this->mEnd);
+ return this->mStart[index];
+ }
+
+ /** Set a bit */
+ void settfb(size_t index, int value)
+ {
+ char *dp = this->mStart+index;
+ assert(dp<this->mEnd);
+ *dp = value;
+ }
+
+ typedef char* iterator;
+ typedef const char* const_iterator;
};
+// (pat) BitVector2 was an intermediate step in fixing BitVector but is no longer needed.
+#define BitVector2 BitVector
std::ostream& operator<<(std::ostream&, const BitVector&);
@@ -348,7 +339,7 @@ class SoftVector: public Vector<float> {
/** Construct a SoftVector from a C string of "0", "1", and "X". */
SoftVector(const char* valString);
-
+
/** Construct a SoftVector from a BitVector. */
SoftVector(const BitVector& source);
@@ -395,8 +386,11 @@ class SoftVector: public Vector<float> {
const SoftVector tail(size_t start) const { return segment(start,size()-start); }
//@}
- /** Decode soft symbols with the GSM rate-1/2 Viterbi decoder. */
- void decode(ViterbiR2O4 &decoder, BitVector& target) const;
+ // (pat) How good is the SoftVector in the sense of the bits being solid?
+ // Result of 1 is perfect and 0 means all the bits were 0.5
+ // If plow is non-NULL, also return the lowest energy bit.
+ float getEnergy(float *low=0) const;
+ float getSNR() const;
/** Fill with "unknown" values. */
void unknown() { fill(0.5F); }
@@ -412,13 +406,29 @@ class SoftVector: public Vector<float> {
/** Slice the whole signal into bits. */
BitVector sliced() const;
-};
+ /** Copy to dst, adding in 0.5 for those indexed in puncture. */
+ void copyUnPunctured(SoftVector &dst, const unsigned *puncture, const size_t plth);
+ /** Return a soft bit. */
+ float softbit(size_t index) const
+ {
+ const float *dp = mStart+index;
+ assert(dp<mEnd);
+ return *dp;
+ }
+ /** Set a soft bit */
+ void settfb(size_t index, float value)
+ {
+ float *dp = mStart+index;
+ assert(dp<mEnd);
+ *dp = value;
+ }
+};
-std::ostream& operator<<(std::ostream&, const SoftVector&);
+std::ostream& operator<<(std::ostream&, const SoftVector&);
diff --git a/lib/decoding/GSM503Tables.cpp b/lib/decoding/GSM503Tables.cpp
new file mode 100644
index 0000000..02150f2
--- /dev/null
+++ b/lib/decoding/GSM503Tables.cpp
@@ -0,0 +1,322 @@
+/*
+* Copyright 2012, 2014 Range Networks, Inc.
+*
+* This software is distributed under multiple licenses; see the COPYING file in the main directory for licensing information for this specific distribution.
+*
+* This use of this software may be subject to additional restrictions.
+* See the LEGAL file in the main directory for details.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+*/
+
+
+
+#include "GSM503Tables.h"
+
+
+/*
+ This array encodes GSM 05.03 Table 7.
+*/
+const unsigned int GSM::gAMRBitOrderTCH_AFS12_2[244] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 23, 15, 16, 17, 18,
+ 19, 20, 21, 22, 24, 25, 26, 27, 28, 38,
+ 141, 39, 142, 40, 143, 41, 144, 42, 145, 43,
+ 146, 44, 147, 45, 148, 46, 149, 47, 97, 150,
+ 200, 48, 98, 151, 201, 49, 99, 152, 202, 86,
+ 136, 189, 239, 87, 137, 190, 240, 88, 138, 191,
+ 241, 91, 194, 92, 195, 93, 196, 94, 197, 95,
+ 198, 29, 30, 31, 32, 33, 34, 35, 50, 100,
+ 153, 203, 89, 139, 192, 242, 51, 101, 154, 204,
+ 55, 105, 158, 208, 90, 140, 193, 243, 59, 109,
+ 162, 212, 63, 113, 166, 216, 67, 117, 170, 220,
+ 36, 37, 54, 53, 52, 58, 57, 56, 62, 61,
+ 60, 66, 65, 64, 70, 69, 68, 104, 103, 102,
+ 108, 107, 106, 112, 111, 110, 116, 115, 114, 120,
+ 119, 118, 157, 156, 155, 161, 160, 159, 165, 164,
+ 163, 169, 168, 167, 173, 172, 171, 207, 206, 205,
+ 211, 210, 209, 215, 214, 213, 219, 218, 217, 223,
+ 222, 221, 73, 72, 71, 76, 75, 74, 79, 78,
+ 77, 82, 81, 80, 85, 84, 83, 123, 122, 121,
+ 126, 125, 124, 129, 128, 127, 132, 131, 130, 135,
+ 134, 133, 176, 175, 174, 179, 178, 177, 182, 181,
+ 180, 185, 184, 183, 188, 187, 186, 226, 225, 224,
+ 229, 228, 227, 232, 231, 230, 235, 234, 233, 238,
+ 237, 236, 96, 199
+};
+
+
+/*
+ This array encodes GSM 05.03 Table 8.
+*/
+const unsigned int GSM::gAMRBitOrderTCH_AFS10_2[204] = {
+ 7, 6, 5, 4, 3, 2, 1, 0, 16, 15,
+ 14, 13, 12, 11, 10, 9, 8, 26, 27, 28,
+ 29, 30, 31, 115, 116, 117, 118, 119, 120, 72,
+ 73, 161, 162, 65, 68, 69, 108, 111, 112, 154,
+ 157, 158, 197, 200, 201, 32, 33, 121, 122, 74,
+ 75, 163, 164, 66, 109, 155, 198, 19, 23, 21,
+ 22, 18, 17, 20, 24, 25, 37, 36, 35, 34,
+ 80, 79, 78, 77, 126, 125, 124, 123, 169, 168,
+ 167, 166, 70, 67, 71, 113, 110, 114, 159, 156,
+ 160, 202, 199, 203, 76, 165, 81, 82, 92, 91,
+ 93, 83, 95, 85, 84, 94, 101, 102, 96, 104,
+ 86, 103, 87, 97, 127, 128, 138, 137, 139, 129,
+ 141, 131, 130, 140, 147, 148, 142, 150, 132, 149,
+ 133, 143, 170, 171, 181, 180, 182, 172, 184, 174,
+ 173, 183, 190, 191, 185, 193, 175, 192, 176, 186,
+ 38, 39, 49, 48, 50, 40, 52, 42, 41, 51,
+ 58, 59, 53, 61, 43, 60, 44, 54, 194, 179,
+ 189, 196, 177, 195, 178, 187, 188, 151, 136, 146,
+ 153, 134, 152, 135, 144, 145, 105, 90, 100, 107,
+ 88, 106, 89, 98, 99, 62, 47, 57, 64, 45,
+ 63, 46, 55, 56
+};
+
+
+/*
+ This array encodes GSM 05.03 Table 9.
+*/
+const unsigned int GSM::gAMRBitOrderTCH_AFS7_95[159] = {
+ 8, 7, 6, 5, 4, 3, 2, 14, 16, 9,
+ 10, 12, 13, 15, 11, 17, 20, 22, 24, 23,
+ 19, 18, 21, 56, 88, 122, 154, 57, 89, 123,
+ 155, 58, 90, 124, 156, 52, 84, 118, 150, 53,
+ 85, 119, 151, 27, 93, 28, 94, 29, 95, 30,
+ 96, 31, 97, 61, 127, 62, 128, 63, 129, 59,
+ 91, 125, 157, 32, 98, 64, 130, 1, 0, 25,
+ 26, 33, 99, 34, 100, 65, 131, 66, 132, 54,
+ 86, 120, 152, 60, 92, 126, 158, 55, 87, 121,
+ 153, 117, 116, 115, 46, 78, 112, 144, 43, 75,
+ 109, 141, 40, 72, 106, 138, 36, 68, 102, 134,
+ 114, 149, 148, 147, 146, 83, 82, 81, 80, 51,
+ 50, 49, 48, 47, 45, 44, 42, 39, 35, 79,
+ 77, 76, 74, 71, 67, 113, 111, 110, 108, 105,
+ 101, 145, 143, 142, 140, 137, 133, 41, 73, 107,
+ 139, 37, 69, 103, 135, 38, 70, 104, 136
+};
+
+/*
+ This array encodes GSM 05.03 Table 10.
+*/
+const unsigned int GSM::gAMRBitOrderTCH_AFS7_4[148] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 26, 87, 27,
+ 88, 28, 89, 29, 90, 30, 91, 51, 80, 112,
+ 141, 52, 81, 113, 142, 54, 83, 115, 144, 55,
+ 84, 116, 145, 58, 119, 59, 120, 21, 22, 23,
+ 17, 18, 19, 31, 60, 92, 121, 56, 85, 117,
+ 146, 20, 24, 25, 50, 79, 111, 140, 57, 86,
+ 118, 147, 49, 78, 110, 139, 48, 77, 53, 82,
+ 114, 143, 109, 138, 47, 76, 108, 137, 32, 33,
+ 61, 62, 93, 94, 122, 123, 41, 42, 43, 44,
+ 45, 46, 70, 71, 72, 73, 74, 75, 102, 103,
+ 104, 105, 106, 107, 131, 132, 133, 134, 135, 136,
+ 34, 63, 95, 124, 35, 64, 96, 125, 36, 65,
+ 97, 126, 37, 66, 98, 127, 38, 67, 99, 128,
+ 39, 68, 100, 129, 40, 69, 101, 130
+};
+
+/*
+ This array encodes GSM 05.03 Table 11.
+*/
+const unsigned int GSM::gAMRBitOrderTCH_AFS6_7[134] = {
+ 0, 1, 4, 3, 5, 6, 13, 7, 2, 8,
+ 9, 11, 15, 12, 14, 10, 28, 82, 29, 83,
+ 27, 81, 26, 80, 30, 84, 16, 55, 109, 56,
+ 110, 31, 85, 57, 111, 48, 73, 102, 127, 32,
+ 86, 51, 76, 105, 130, 52, 77, 106, 131, 58,
+ 112, 33, 87, 19, 23, 53, 78, 107, 132, 21,
+ 22, 18, 17, 20, 24, 25, 50, 75, 104, 129,
+ 47, 72, 101, 126, 54, 79, 108, 133, 46, 71,
+ 100, 125, 128, 103, 74, 49, 45, 70, 99, 124,
+ 42, 67, 96, 121, 39, 64, 93, 118, 38, 63,
+ 92, 117, 35, 60, 89, 114, 34, 59, 88, 113,
+ 44, 69, 98, 123, 43, 68, 97, 122, 41, 66,
+ 95, 120, 40, 65, 94, 119, 37, 62, 91, 116,
+ 36, 61, 90, 115
+};
+
+/*
+ This array encodes GSM 05.03 Table 12.
+*/
+const unsigned int GSM::gAMRBitOrderTCH_AFS5_9[118] = {
+ 0, 1, 4, 5, 3, 6, 7, 2, 13, 15,
+ 8, 9, 11, 12, 14, 10, 16, 28, 74, 29,
+ 75, 27, 73, 26, 72, 30, 76, 51, 97, 50,
+ 71, 96, 117, 31, 77, 52, 98, 49, 70, 95,
+ 116, 53, 99, 32, 78, 33, 79, 48, 69, 94,
+ 115, 47, 68, 93, 114, 46, 67, 92, 113, 19,
+ 21, 23, 22, 18, 17, 20, 24, 111, 43, 89,
+ 110, 64, 65, 44, 90, 25, 45, 66, 91, 112,
+ 54, 100, 40, 61, 86, 107, 39, 60, 85, 106,
+ 36, 57, 82, 103, 35, 56, 81, 102, 34, 55,
+ 80, 101, 42, 63, 88, 109, 41, 62, 87, 108,
+ 38, 59, 84, 105, 37, 58, 83, 104
+};
+
+/*
+ This array encodes GSM 05.03 Table 13.
+*/
+const unsigned int GSM::gAMRBitOrderTCH_AFS5_15[103] = {
+ 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
+ 13, 12, 11, 10, 9, 8, 23, 24, 25, 26,
+ 27, 46, 65, 84, 45, 44, 43, 64, 63, 62,
+ 83, 82, 81, 102, 101, 100, 42, 61, 80, 99,
+ 28, 47, 66, 85, 18, 41, 60, 79, 98, 29,
+ 48, 67, 17, 20, 22, 40, 59, 78, 97, 21,
+ 30, 49, 68, 86, 19, 16, 87, 39, 38, 58,
+ 57, 77, 35, 54, 73, 92, 76, 96, 95, 36,
+ 55, 74, 93, 32, 51, 33, 52, 70, 71, 89,
+ 90, 31, 50, 69, 88, 37, 56, 75, 94, 34,
+ 53, 72, 91
+};
+
+/*
+ This array encodes GSM 05.03 Table 14.
+*/
+const unsigned int GSM::gAMRBitOrderTCH_AFS4_75[95] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 23, 24, 25, 26,
+ 27, 28, 48, 49, 61, 62, 82, 83, 47, 46,
+ 45, 44, 81, 80, 79, 78, 17, 18, 20, 22,
+ 77, 76, 75, 74, 29, 30, 43, 42, 41, 40,
+ 38, 39, 16, 19, 21, 50, 51, 59, 60, 63,
+ 64, 72, 73, 84, 85, 93, 94, 32, 33, 35,
+ 36, 53, 54, 56, 57, 66, 67, 69, 70, 87,
+ 88, 90, 91, 34, 55, 68, 89, 37, 58, 71,
+ 92, 31, 52, 65, 86
+};
+
+/* GSM 05.03 3.9.4.4 */
+const unsigned int GSM::gAMRPuncturedTCH_AFS12_2[60] = {
+ 321, 325, 329, 333, 337, 341, 345, 349, 353, 357, 361, 363, 365,
+ 369, 373, 377, 379, 381, 385, 389, 393, 395, 397, 401, 405, 409,
+ 411, 413, 417, 421, 425, 427, 429, 433, 437, 441, 443, 445, 449,
+ 453, 457, 459, 461, 465, 469, 473, 475, 477, 481, 485, 489, 491,
+ 493, 495, 497, 499, 501, 503, 505, 507
+};
+
+/* GSM 05.03 3.9.4.4 */
+const unsigned int GSM::gAMRPuncturedTCH_AFS10_2[194] = {
+ 1, 4, 7, 10, 16, 19, 22, 28, 31, 34, 40, 43, 46, 52, 55, 58,
+ 64, 67, 70, 76, 79, 82, 88, 91, 94, 100, 103, 106, 112, 115,
+ 118, 124, 127, 130, 136, 139, 142, 148, 151, 154, 160, 163, 166,
+ 172, 175, 178, 184, 187, 190, 196, 199, 202, 208, 211, 214, 220,
+ 223, 226, 232, 235, 238, 244, 247, 250, 256, 259, 262, 268, 271,
+ 274, 280, 283, 286, 292, 295, 298, 304, 307, 310, 316, 319, 322,
+ 325, 328, 331, 334, 337, 340, 343, 346, 349, 352, 355, 358, 361,
+ 364, 367, 370, 373, 376, 379, 382, 385, 388, 391, 394, 397, 400,
+ 403, 406, 409, 412, 415, 418, 421, 424, 427, 430, 433, 436, 439,
+ 442, 445, 448, 451, 454, 457, 460, 463, 466, 469, 472, 475, 478,
+ 481, 484, 487, 490, 493, 496, 499, 502, 505, 508, 511, 514, 517,
+ 520, 523, 526, 529, 532, 535, 538, 541, 544, 547, 550, 553, 556,
+ 559, 562, 565, 568, 571, 574, 577, 580, 583, 586, 589, 592, 595,
+ 598, 601, 604, 607, 609, 610, 613, 616, 619, 621, 622, 625, 627,
+ 628, 631, 633, 634, 636, 637, 639, 640
+};
+
+/* GSM 05.03 3.9.4.4 */
+const unsigned int GSM::gAMRPuncturedTCH_AFS7_95[65] = {
+ 1, 2, 4, 5, 8, 22, 70, 118, 166, 214, 262, 310, 317, 319, 325,
+ 332, 334, 341, 343, 349, 356, 358, 365, 367, 373, 380, 382, 385,
+ 389, 391, 397, 404, 406, 409, 413, 415, 421, 428, 430, 433, 437,
+ 439, 445, 452, 454, 457, 461, 463, 469, 476, 478, 481, 485, 487,
+ 490, 493, 500, 502, 503, 505, 506, 508, 509, 511, 512
+};
+
+/* GSM 05.03 3.9.4.4 */
+const unsigned int GSM::gAMRPuncturedTCH_AFS7_4[26] = {
+ 0, 355, 361, 367, 373, 379, 385, 391, 397, 403, 409, 415, 421,
+ 427, 433, 439, 445, 451, 457, 460, 463, 466, 468, 469, 471,
+ 472
+};
+
+/* GSM 05.03 3.9.4.4 */
+const unsigned int GSM::gAMRPuncturedTCH_AFS6_7[128] = {
+ 1, 3, 7, 11, 15, 27, 39, 55, 67, 79, 95, 107, 119, 135, 147,
+ 159, 175, 187, 199, 215, 227, 239, 255, 267, 279, 287, 291, 295,
+ 299, 303, 307, 311, 315, 319, 323, 327, 331, 335, 339, 343, 347,
+ 351, 355, 359, 363, 367, 369, 371, 375, 377, 379, 383, 385, 387,
+ 391, 393, 395, 399, 401, 403, 407, 409, 411, 415, 417, 419, 423,
+ 425, 427, 431, 433, 435, 439, 441, 443, 447, 449, 451, 455, 457,
+ 459, 463, 465, 467, 471, 473, 475, 479, 481, 483, 487, 489, 491,
+ 495, 497, 499, 503, 505, 507, 511, 513, 515, 519, 521, 523, 527,
+ 529, 531, 535, 537, 539, 543, 545, 547, 549, 551, 553, 555, 557,
+ 559, 561, 563, 565, 567, 569, 571, 573, 575
+};
+
+/* GSM 05.03 3.9.4.4 */
+const unsigned int GSM::gAMRPuncturedTCH_AFS5_9[72] = {
+ 0, 1, 3, 5, 7, 11, 15, 31, 47, 63, 79, 95, 111, 127, 143,
+ 159, 175, 191, 207, 223, 239, 255, 271, 287, 303, 319, 327, 331,
+ 335, 343, 347, 351, 359, 363, 367, 375, 379, 383, 391, 395, 399,
+ 407, 411, 415, 423, 427, 431, 439, 443, 447, 455, 459, 463, 467,
+ 471, 475, 479, 483, 487, 491, 495, 499, 503, 507, 509, 511, 512,
+ 513, 515, 516, 517, 519
+};
+
+/* GSM 05.03 3.9.4.4 */
+const unsigned int GSM::gAMRPuncturedTCH_AFS5_15[117] = {
+ 0, 4, 5, 9, 10, 14, 15, 20, 25, 30, 35, 40, 50, 60, 70,
+ 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200,
+ 210, 220, 230, 240, 250, 260, 270, 280, 290, 300, 310, 315, 320,
+ 325, 330, 334, 335, 340, 344, 345, 350, 354, 355, 360, 364, 365,
+ 370, 374, 375, 380, 384, 385, 390, 394, 395, 400, 404, 405, 410,
+ 414, 415, 420, 424, 425, 430, 434, 435, 440, 444, 445, 450, 454,
+ 455, 460, 464, 465, 470, 474, 475, 480, 484, 485, 490, 494, 495,
+ 500, 504, 505, 510, 514, 515, 520, 524, 525, 529, 530, 534, 535,
+ 539, 540, 544, 545, 549, 550, 554, 555, 559, 560, 564
+};
+
+/* GSM 05.03 3.9.4.4 */
+const unsigned int GSM::gAMRPuncturedTCH_AFS4_75[87] = {
+ 0, 1, 2, 4, 5, 7, 9, 15, 25, 35, 45, 55, 65, 75, 85, 95,
+ 105, 115, 125, 135, 145, 155, 165, 175, 185, 195, 205, 215, 225,
+ 235, 245, 255, 265, 275, 285, 295, 305, 315, 325, 335, 345, 355,
+ 365, 375, 385, 395, 400, 405, 410, 415, 420, 425, 430, 435, 440,
+ 445, 450, 455, 459, 460, 465, 470, 475, 479, 480, 485, 490, 495,
+ 499, 500, 505, 509, 510, 515, 517, 519, 520, 522, 524, 525, 526,
+ 527, 529, 530, 531, 532, 534
+};
+
+/* GSM 05.03 Tables 7-14 */
+const unsigned int *GSM::gAMRBitOrder[8] = {
+ GSM::gAMRBitOrderTCH_AFS12_2,
+ GSM::gAMRBitOrderTCH_AFS10_2,
+ GSM::gAMRBitOrderTCH_AFS7_95,
+ GSM::gAMRBitOrderTCH_AFS7_4,
+ GSM::gAMRBitOrderTCH_AFS6_7,
+ GSM::gAMRBitOrderTCH_AFS5_9,
+ GSM::gAMRBitOrderTCH_AFS5_15,
+ GSM::gAMRBitOrderTCH_AFS4_75
+};
+
+/* GSM 05.03 3.9.4.2 */
+const unsigned int GSM::gAMRKd[9] = {244, 204, 159, 148, 134, 118, 103, 95, 260}; // The last entry is for TCH_FS (GSM mode)
+
+/* GSM 05.03 3.9.4.2 */
+const unsigned int GSM::gAMRClass1ALth[8] = {81, 65, 75, 61, 55, 55, 49, 39};
+
+/* GSM 05.03 3.9.4.4 */
+const unsigned int GSM::gAMRTCHUCLth[8] = {508, 642, 513, 474, 576, 520, 565, 535};
+
+/* GSM 05.03 3.9.4.2 */
+const unsigned int GSM::gAMRPunctureLth[8] = {60, 194, 65, 26, 128, 72, 117, 87};
+
+/* GSM 05.03 3.9.4.4 */
+const unsigned int *GSM::gAMRPuncture[8] = {
+ GSM::gAMRPuncturedTCH_AFS12_2,
+ GSM::gAMRPuncturedTCH_AFS10_2,
+ GSM::gAMRPuncturedTCH_AFS7_95,
+ GSM::gAMRPuncturedTCH_AFS7_4,
+ GSM::gAMRPuncturedTCH_AFS6_7,
+ GSM::gAMRPuncturedTCH_AFS5_9,
+ GSM::gAMRPuncturedTCH_AFS5_15,
+ GSM::gAMRPuncturedTCH_AFS4_75
+};
+
+
diff --git a/lib/decoding/GSM503Tables.h b/lib/decoding/GSM503Tables.h
new file mode 100644
index 0000000..6b6327c
--- /dev/null
+++ b/lib/decoding/GSM503Tables.h
@@ -0,0 +1,71 @@
+/*
+* Copyright 2012, 2014 Range Networks, Inc.
+*
+* This software is distributed under multiple licenses; see the COPYING file in the main directory for licensing information for this specific distribution.
+*
+* This use of this software may be subject to additional restrictions.
+* See the LEGAL file in the main directory for details.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+*/
+
+
+
+#ifndef GSM503TABLES_H
+#define GSM503TABLES_H
+
+
+
+namespace GSM {
+
+// don't change the positions in this enum
+// (pat) The first 8 values are used as indicies into numerous tables.
+// (pat) Encoder/decoder mode includes 8 modes for AMR + TCH_FS makes 9.
+// TODO: Add AFS_SID type. And why is it not type 8?
+enum AMRMode {TCH_AFS12_2, TCH_AFS10_2, TCH_AFS7_95, TCH_AFS7_4, TCH_AFS6_7, TCH_AFS5_9, TCH_AFS5_15, TCH_AFS4_75, TCH_FS};
+
+/** Tables #7-14 from GSM 05.03 */
+extern const unsigned int gAMRBitOrderTCH_AFS12_2[244];
+extern const unsigned int gAMRBitOrderTCH_AFS10_2[204];
+extern const unsigned int gAMRBitOrderTCH_AFS7_95[159];
+extern const unsigned int gAMRBitOrderTCH_AFS7_4[148];
+extern const unsigned int gAMRBitOrderTCH_AFS6_7[134];
+extern const unsigned int gAMRBitOrderTCH_AFS5_9[118];
+extern const unsigned int gAMRBitOrderTCH_AFS5_15[103];
+extern const unsigned int gAMRBitOrderTCH_AFS4_75[95];
+
+/** GSM 05.03 3.9.4.4 */
+extern const unsigned int gAMRPuncturedTCH_AFS12_2[60];
+extern const unsigned int gAMRPuncturedTCH_AFS10_2[194];
+extern const unsigned int gAMRPuncturedTCH_AFS7_95[65];
+extern const unsigned int gAMRPuncturedTCH_AFS7_4[26];
+extern const unsigned int gAMRPuncturedTCH_AFS6_7[128];
+extern const unsigned int gAMRPuncturedTCH_AFS5_9[72];
+extern const unsigned int gAMRPuncturedTCH_AFS5_15[117];
+extern const unsigned int gAMRPuncturedTCH_AFS4_75[87];
+
+/* GSM 05.03 Tables 7-14 */
+extern const unsigned *gAMRBitOrder[8];
+
+/* GSM 05.03 3.9.4.2 */
+extern const unsigned gAMRKd[9];
+
+/* GSM 05.03 3.9.4.2 */
+extern const unsigned gAMRClass1ALth[8];
+
+/* GSM 05.03 3.9.4.4 */
+extern const unsigned gAMRTCHUCLth[8];
+
+/* GSM 05.03 3.9.4.2 */
+extern const unsigned gAMRPunctureLth[8];
+
+/* GSM 05.03 3.9.4.4 */
+extern const unsigned *gAMRPuncture[8];
+
+}
+
+
+#endif
diff --git a/lib/decoding/GSM690Tables.cpp b/lib/decoding/GSM690Tables.cpp
deleted file mode 100644
index 7c40a9a..0000000
--- a/lib/decoding/GSM690Tables.cpp
+++ /dev/null
@@ -1,49 +0,0 @@
-/* AMR (GSM 06.90) importance bit ordering */
-
-/*
- * Copyright 2010 Sylvain Munaut <tnt@246tNt.com>
- * All Rights Reserved
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "GSM690Tables.h"
-
-unsigned int GSM::g690_12_2_BitOrder[244] = {
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
- 10, 11, 12, 13, 14, 23, 15, 16, 17, 18,
- 19, 20, 21, 22, 24, 25, 26, 27, 28, 38,
- 141, 39, 142, 40, 143, 41, 144, 42, 145, 43,
- 146, 44, 147, 45, 148, 46, 149, 47, 97, 150,
- 200, 48, 98, 151, 201, 49, 99, 152, 202, 86,
- 136, 189, 239, 87, 137, 190, 240, 88, 138, 191,
- 241, 91, 194, 92, 195, 93, 196, 94, 197, 95,
- 198, 29, 30, 31, 32, 33, 34, 35, 50, 100,
- 153, 203, 89, 139, 192, 242, 51, 101, 154, 204,
- 55, 105, 158, 208, 90, 140, 193, 243, 59, 109,
- 162, 212, 63, 113, 166, 216, 67, 117, 170, 220,
- 36, 37, 54, 53, 52, 58, 57, 56, 62, 61,
- 60, 66, 65, 64, 70, 69, 68, 104, 103, 102,
- 108, 107, 106, 112, 111, 110, 116, 115, 114, 120,
- 119, 118, 157, 156, 155, 161, 160, 159, 165, 164,
- 163, 169, 168, 167, 173, 172, 171, 207, 206, 205,
- 211, 210, 209, 215, 214, 213, 219, 218, 217, 223,
- 222, 221, 73, 72, 71, 76, 75, 74, 79, 78,
- 77, 82, 81, 80, 85, 84, 83, 123, 122, 121,
- 126, 125, 124, 129, 128, 127, 132, 131, 130, 135,
- 134, 133, 176, 175, 174, 179, 178, 177, 182, 181,
- 180, 185, 184, 183, 188, 187, 186, 226, 225, 224,
- 229, 228, 227, 232, 231, 230, 235, 234, 233, 238,
- 237, 236, 96, 199,
-};
diff --git a/lib/decoding/GSM690Tables.h b/lib/decoding/GSM690Tables.h
deleted file mode 100644
index 6b9bb36..0000000
--- a/lib/decoding/GSM690Tables.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* AMR (GSM 06.90) importance bit ordering */
-
-/*
- * Copyright 2010 Sylvain Munaut <tnt@246tNt.com>
- * All Rights Reserved
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef GSM690TABLES_H
-#define GSM690TABLES_H
-
-namespace GSM {
-
-/** Table #7 from GSM 05.03 */
-extern unsigned int g690_12_2_BitOrder[244];
-
-}
-
-#endif /* GSM690TABLES_H */
diff --git a/lib/decoding/Vector.h b/lib/decoding/Vector.h
index 88ab654..23b4dd3 100644
--- a/lib/decoding/Vector.h
+++ b/lib/decoding/Vector.h
@@ -1,23 +1,23 @@
/**@file Simplified Vector template with aliases. */
/*
* Copyright 2008 Free Software Foundation, Inc.
+* Copyright 2014 Range Networks, Inc.
*
-* This software is distributed under the terms of the GNU Public License.
+* This software is distributed under the terms of the GNU Affero Public License.
* See the COPYING file in the main directory for details.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-
+*
+* This use of this software may be subject to additional restrictions.
+* See the LEGAL file in the main directory for details.
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -29,225 +29,367 @@
#include <string.h>
#include <iostream>
#include <assert.h>
+#include <stdio.h>
+// We cant use Logger.h in this file...
+extern int gVectorDebug;
+//#define ENABLE_VECTORDEBUG
+#ifdef ENABLE_VECTORDEBUG
+#define VECTORDEBUG(...) { printf(__VA_ARGS__); printf(" this=%p [%p,%p,%p]\n",(void*)this,(void*)&mData,mStart,mEnd); }
+//#define VECTORDEBUG(msg) { std::cout<<msg<<std::endl; }
+#else
+#define VECTORDEBUG(...)
+#endif
-/**
- A simplified Vector template with aliases.
- Unlike std::vector, this class does not support dynamic resizing.
- Unlike std::vector, this class does support "aliases" and subvectors.
-*/
-template <class T> class Vector {
-
- // TODO -- Replace memcpy calls with for-loops.
-
- public:
-
- /**@name Iterator types. */
- //@{
- typedef T* iterator;
- typedef const T* const_iterator;
- //@}
-
- protected:
-
- T* mData; ///< allocated data block, if any
- T* mStart; ///< start of useful data
- T* mEnd; ///< end of useful data + 1
-
- public:
-
- /** Return the size of the Vector. */
- size_t size() const
- {
- assert(mStart>=mData);
- assert(mEnd>=mStart);
- return mEnd - mStart;
- }
-
- /** Return size in bytes. */
- size_t bytes() const { return size()*sizeof(T); }
-
- /** Change the size of the Vector, discarding content. */
- void resize(size_t newSize)
- {
- if (mData!=NULL) delete[] mData;
- if (newSize==0) mData=NULL;
- else mData = new T[newSize];
- mStart = mData;
- mEnd = mStart + newSize;
- }
-
- /** Release memory and clear pointers. */
- void clear() { resize(0); }
-
-
- /** Copy data from another vector. */
- void clone(const Vector<T>& other)
- {
- resize(other.size());
- memcpy(mData,other.mStart,other.bytes());
- }
-
-
-
-
- //@{
-
- /** Build an empty Vector of a given size. */
- Vector(size_t wSize=0):mData(NULL) { resize(wSize); }
-
- /** Build a Vector by shifting the data block. */
- Vector(Vector<T>& other)
- :mData(other.mData),mStart(other.mStart),mEnd(other.mEnd)
- { other.mData=NULL; }
-
- /** Build a Vector by copying another. */
- Vector(const Vector<T>& other):mData(NULL) { clone(other); }
-
- /** Build a Vector with explicit values. */
- Vector(T* wData, T* wStart, T* wEnd)
- :mData(wData),mStart(wStart),mEnd(wEnd)
- { }
-
- /** Build a vector from an existing block, NOT to be deleted upon destruction. */
- Vector(T* wStart, size_t span)
- :mData(NULL),mStart(wStart),mEnd(wStart+span)
- { }
-
- /** Build a Vector by concatenation. */
- Vector(const Vector<T>& other1, const Vector<T>& other2)
- :mData(NULL)
- {
- resize(other1.size()+other2.size());
- memcpy(mStart, other1.mStart, other1.bytes());
- memcpy(mStart+other1.size(), other2.mStart, other2.bytes());
- }
-
- //@}
-
- /** Destroy a Vector, deleting held memory. */
- ~Vector() { clear(); }
-
-
-
-
- //@{
-
- /** Assign from another Vector, shifting ownership. */
- void operator=(Vector<T>& other)
- {
- clear();
- mData=other.mData;
- mStart=other.mStart;
- mEnd=other.mEnd;
- other.mData=NULL;
- }
-
- /** Assign from another Vector, copying. */
- void operator=(const Vector<T>& other) { clone(other); }
-
- //@}
+#define BITVECTOR_REFCNTS 0
+#if BITVECTOR_REFCNTS
+// (pat) Started to add refcnts, decided against it for now.
+template <class T> class RCData : public RefCntBase {
+ public:
+ T* mPointer;
+};
+#endif
- //@{
-
- /** Return an alias to a segment of this Vector. */
- Vector<T> segment(size_t start, size_t span)
- {
- T* wStart = mStart + start;
- T* wEnd = wStart + span;
- assert(wEnd<=mEnd);
- return Vector<T>(NULL,wStart,wEnd);
- }
-
- /** Return an alias to a segment of this Vector. */
- const Vector<T> segment(size_t start, size_t span) const
- {
- T* wStart = mStart + start;
- T* wEnd = wStart + span;
- assert(wEnd<=mEnd);
- return Vector<T>(NULL,wStart,wEnd);
- }
-
- Vector<T> head(size_t span) { return segment(0,span); }
- const Vector<T> head(size_t span) const { return segment(0,span); }
- Vector<T> tail(size_t start) { return segment(start,size()-start); }
- const Vector<T> tail(size_t start) const { return segment(start,size()-start); }
-
- /**
- Copy part of this Vector to a segment of another Vector.
- @param other The other vector.
- @param start The start point in the other vector.
- @param span The number of elements to copy.
- */
- void copyToSegment(Vector<T>& other, size_t start, size_t span) const
- {
- T* base = other.mStart + start;
- assert(base+span<=other.mEnd);
- assert(mStart+span<=mEnd);
- memcpy(base,mStart,span*sizeof(T));
- }
- /** Copy all of this Vector to a segment of another Vector. */
- void copyToSegment(Vector<T>& other, size_t start=0) const { copyToSegment(other,start,size()); }
-
- void copyTo(Vector<T>& other) const { copyToSegment(other,0,size()); }
-
- /**
- Copy a segment of this vector into another.
- @param other The other vector (to copt into starting at 0.)
- @param start The start point in this vector.
- @param span The number of elements to copy.
- */
- void segmentCopyTo(Vector<T>& other, size_t start, size_t span)
- {
- T* base = mStart + start;
- assert(base+span<=mEnd);
- assert(other.mStart+span<=other.mEnd);
- memcpy(other.mStart,base,span*sizeof(T));
- }
-
- void fill(const T& val)
- {
- T* dp=mStart;
- while (dp<mEnd) *dp++=val;
- }
-
-
- //@}
-
-
- //@{
-
- T& operator[](size_t index)
- {
- assert(mStart+index<mEnd);
- return mStart[index];
- }
-
- const T& operator[](size_t index) const
- {
- assert(mStart+index<mEnd);
- return mStart[index];
- }
-
- const T* begin() const { return mStart; }
- T* begin() { return mStart; }
- const T* end() const { return mEnd; }
- T* end() { return mEnd; }
- //@}
+/**
+ A simplified Vector template with aliases.
+ Unlike std::vector, this class does not support dynamic resizing.
+ Unlike std::vector, this class does support "aliases" and subvectors.
+*/
+// (pat) Nov 2013: Vector and the derived classes BitVector and SoftVector were originally written with behavior
+// that differed for const and non-const cases, making them very difficult to use and resulting in many extremely
+// difficult to find bugs in the code base.
+// Ultimately these classes should all be converted to reference counted methodologies, but as an interim measure
+// I am rationalizing their behavior until we flush out all places in the code base that inadvertently depended
+// on the original behavior. This is done with assert statements in BitVector methods.
+// ====
+// What the behavior was probably supposed to be:
+// Vectors can 'own' the data they point to or not. Only one Vector 'owns' the memory at a time,
+// so that automatic destruction can be used. So whenever there is an operation that yields one
+// vector from another the options were: clone (allocate a new vector from memory), alias (make the
+// new vector point into the memory of the original vector) or shift (the new Vector steals the
+// memory ownership from the original vector.)
+// The const copy-constructor did a clone, the non-const copy constructor did a shiftMem, and the segment and
+// related methods (head, tail, etc) returned aliases.
+// Since a copy-constructor is inserted transparently in sometimes surprising places, this made the
+// class very difficult to use. Moreover, since the C++ standard specifies that a copy-constructor is used
+// to copy the return value from functions, it makes it literally impossible for a function to fully control
+// the return value. Our code has relied on the "Return Value Optimization" which says that the C++ compiler
+// may omit the copy-construction of the return value even if the copy-constructor has side-effects, which ours does.
+// This methodology is fundamentally incompatible with C++.
+// What the original behavior actually was:
+// class Vector:
+// The copy-constructor and assignment operators did a clone for the const case and a shift for the non-const case.
+// This is really horrible.
+// The segment methods were identical for const and non-const cases, always returning an alias.
+// This also resulted in zillions of redundant mallocs and copies throughout the code base.
+// class BitVector:
+// Copy-constructor:
+// BitVector did not have any copy-constructors, and I think the intent was that it would have the same behavior
+// as Vector, but that is not how C++ works: with no copy-constructor the default copy-constructor
+// uses only the const case, so only the const Vector copy-constructor was used. Therefore it always cloned,
+// and the code base relied heavily on the "Return Value Optimization" to work at all.
+// Assignment operator:
+// BitVector did not have one, so C++ makes a default one that calls Vector::operator=() as a side effect,
+// which did a clone; not sure if there was a non-const version and no longer care.
+// segment methods:
+// The non-const segment() returned an alias, and the const segment() returned a clone.
+// I think the intent was that the behavior should be the same as Vector, but there was a conversion
+// of the result of the const segment() method from Vector to BitVector which caused the Vector copy-constructor
+// to be (inadvertently) invoked, resulting in the const version of the segment method returning a clone.
+// What the behavior is now:
+// VectorBase:
+// There is a new VectorBase class that has only the common methods and extremely basic constructors.
+// The VectorBase class MUST NOT CONTAIN: copy constructors, non-trivial constructors called from derived classes,
+// or any method that returns a VectorBase type object. Why? Because any of the above when used in derived classes
+// can cause copy-constructor invocation, often surprisingly, obfuscating the code.
+// Each derived class must provide its own: copy-constructors and segment() and related methods, since we do not
+// want to inadvertently invoke a copy-constructor to convert the segment() result from VectorBase to the derived type.
+// BitVector:
+// The BitVector copy-constructor and assignment operator (inherited from VectorBase) paradigm is:
+// if the copied Vector owned memory, perform a clone so the new vector owns memory also,
+// otherwise just do a simple copy, which is another alias. This isnt perfect but works every place
+// in our code base and easier to use than the previous paradigm.
+// The segment method always returns an alias.
+// If you want a clone of a segment, use cloneSegment(), which replaces the previous: const segment(...) const method.
+// Note that the semantics of cloneSegment still rely on the Return Value Optimization. Oh well, we should use refcnts.
+// Vector:
+// I left Vector alone (except for rearrangement to separate out VectorBase.) Vector should just not be used.
+// SoftVector:
+// SoftVector and signalVector should be updated similar to BitVector, but I did not want to disturb them.
+// What the behavior should be:
+// All these should be reference-counted, similar to ByteVector.
+template <class T> class VectorBase
+{
+ // TODO -- Replace memcpy calls with for-loops. (pat) in case class T is not POD [Plain Old Data]
+ protected:
+#if BITVECTOR_REFCNTS
+ typedef RefCntPointer<RCData<T> > VectorDataType;
+#else
+ typedef T* VectorDataType;
+#endif
+ VectorDataType mData; ///< allocated data block.
+ T* mStart; ///< start of useful data
+ T* mEnd; ///< end of useful data + 1
+
+ // Init vector with specified size. Previous contents are completely discarded. This is only used for initialization.
+ void vInit(size_t elements)
+ {
+ mData = elements ? new T[elements] : NULL;
+ mStart = mData; // This is where mStart get set to zero
+ mEnd = mStart + elements;
+ }
+
+ /** Assign from another Vector, shifting ownership. */
+ // (pat) This should be eliminated, but it is used by Vector and descendents.
+ void shiftMem(VectorBase<T>&other)
+ {
+ VECTORDEBUG("VectorBase::shiftMem(%p)",(void*)&other);
+ this->clear();
+ this->mData=other.mData;
+ this->mStart=other.mStart;
+ this->mEnd=other.mEnd;
+ other.mData=NULL;
+ }
+
+ // Assign from another Vector, making this an alias to other.
+ void makeAlias(const VectorBase<T> &other)
+ {
+ if (this->getData()) {
+ assert(this->getData() != other.getData()); // Not possible by the semantics of Vector.
+ this->clear();
+ }
+ this->mStart=const_cast<T*>(other.mStart);
+ this->mEnd=const_cast<T*>(other.mEnd);
+ }
+
+ public:
+
+ /** Return the size of the Vector in units, ie, the number of T elements. */
+ size_t size() const
+ {
+ assert(mStart>=mData);
+ assert(mEnd>=mStart);
+ return mEnd - mStart;
+ }
+
+ /** Return size in bytes. */
+ size_t bytes() const { return this->size()*sizeof(T); }
+
+ /** Change the size of the Vector in items (not bytes), discarding content. */
+ void resize(size_t newElements) {
+ //VECTORDEBUG("VectorBase::resize("<<(void*)this<<","<<newElements<<")");
+ VECTORDEBUG("VectorBase::resize(%p,%d) %s",this,newElements, (mData?"delete":""));
+ if (mData!=NULL) delete[] mData;
+ vInit(newElements);
+ }
+
+ /** Release memory and clear pointers. */
+ void clear() { this->resize(0); }
+
+
+ /** Copy data from another vector. */
+ void clone(const VectorBase<T>& other) {
+ this->resize(other.size());
+ memcpy(mData,other.mStart,other.bytes());
+ }
+
+ void vConcat(const VectorBase<T>&other1, const VectorBase<T>&other2) {
+ this->resize(other1.size()+other2.size());
+ memcpy(this->mStart, other1.mStart, other1.bytes());
+ memcpy(this->mStart+other1.size(), other2.mStart, other2.bytes());
+ }
+
+ protected:
+
+ VectorBase() : mData(0), mStart(0), mEnd(0) {}
+
+ /** Build a Vector with explicit values. */
+ VectorBase(VectorDataType wData, T* wStart, T* wEnd) :mData(wData),mStart(wStart),mEnd(wEnd) {
+ //VECTORDEBUG("VectorBase("<<(void*)wData);
+ VECTORDEBUG("VectorBase(%p,%p,%p)",this->getData(),wStart,wEnd);
+ }
+
+ public:
+
+ /** Destroy a Vector, deleting held memory. */
+ ~VectorBase() {
+ //VECTORDEBUG("~VectorBase("<<(void*)this<<")");
+ VECTORDEBUG("~VectorBase(%p)",this);
+ this->clear();
+ }
+
+ bool isOwner() { return !!this->mData; } // Do we own any memory ourselves?
+
+ std::string inspect() const {
+ char buf[100];
+ snprintf(buf,100," mData=%p mStart=%p mEnd=%p ",(void*)mData,mStart,mEnd);
+ return std::string(buf);
+ }
+
+
+ /**
+ Copy part of this Vector to a segment of another Vector.
+ @param other The other vector.
+ @param start The start point in the other vector.
+ @param span The number of elements to copy.
+ */
+ void copyToSegment(VectorBase<T>& other, size_t start, size_t span) const
+ {
+ T* base = other.mStart + start;
+ assert(base+span<=other.mEnd);
+ assert(mStart+span<=mEnd);
+ memcpy(base,mStart,span*sizeof(T));
+ }
+
+ /** Copy all of this Vector to a segment of another Vector. */
+ void copyToSegment(VectorBase<T>& other, size_t start=0) const { copyToSegment(other,start,size()); }
+
+ void copyTo(VectorBase<T>& other) const { copyToSegment(other,0,size()); }
+
+ /**
+ Copy a segment of this vector into another.
+ @param other The other vector (to copt into starting at 0.)
+ @param start The start point in this vector.
+ @param span The number of elements to copy.
+ WARNING: This function does NOT resize the result - you must set the result size before entering.
+ */
+ void segmentCopyTo(VectorBase<T>& other, size_t start, size_t span) const
+ {
+ const T* base = mStart + start;
+ assert(base+span<=mEnd);
+ assert(other.mStart+span<=other.mEnd);
+ memcpy(other.mStart,base,span*sizeof(T));
+ }
+
+ void fill(const T& val)
+ {
+ T* dp=mStart;
+ while (dp<mEnd) *dp++=val;
+ }
+
+ void fill(const T& val, unsigned start, unsigned length)
+ {
+ T* dp=mStart+start;
+ T* end=dp+length;
+ assert(end<=mEnd);
+ while (dp<end) *dp++=val;
+ }
+
+ /** Assign from another Vector. */
+ // (pat) This is used for both const and non-const cases.
+ // If the original vector owned memory, clone it, otherwise just copy the segment data.
+ void operator=(const VectorBase<T>& other) {
+ //std::cout << "Vector=(this="<<this->inspect()<<",other="<<other.inspect()<<")"<<endl;
+ if (other.getData()) {
+ this->clone(other);
+ } else {
+ this->makeAlias(other);
+ }
+ //std::cout << "Vector= after(this="<<this->inspect()<<")"<<endl;
+ }
+
+
+ T& operator[](size_t index)
+ {
+ assert(mStart+index<mEnd);
+ return mStart[index];
+ }
+
+ const T& operator[](size_t index) const
+ {
+ assert(mStart+index<mEnd);
+ return mStart[index];
+ }
+
+ const T* begin() const { return this->mStart; }
+ T* begin() { return this->mStart; }
+ const T* end() const { return this->mEnd; }
+ T* end() { return this->mEnd; }
+#if BITVECTOR_REFCNTS
+ const T*getData() const { return this->mData.isNULL() ? 0 : this->mData->mPointer; }
+#else
+ const T*getData() const { return this->mData; }
+#endif
+};
+// (pat) Nov 2013. This class retains the original poor behavior. See comments at VectorBase
+template <class T> class Vector : public VectorBase<T>
+{
+ public:
+
+ /** Build an empty Vector of a given size. */
+ Vector(size_t wSize=0) { this->resize(wSize); }
+
+ /** Build a Vector by shifting the data block. */
+ Vector(Vector<T>& other) : VectorBase<T>(other.mData,other.mStart,other.mEnd) { other.mData=NULL; }
+
+ /** Build a Vector by copying another. */
+ Vector(const Vector<T>& other):VectorBase<T>() { this->clone(other); }
+
+ /** Build a Vector with explicit values. */
+ Vector(T* wData, T* wStart, T* wEnd) : VectorBase<T>(wData,wStart,wEnd) { }
+
+ /** Build a vector from an existing block, NOT to be deleted upon destruction. */
+ Vector(T* wStart, size_t span) : VectorBase<T>(NULL,wStart,wStart+span) { }
+
+ /** Build a Vector by concatenation. */
+ Vector(const Vector<T>& other1, const Vector<T>& other2):VectorBase<T>() {
+ assert(this->mData == 0);
+ this->vConcat(other1,other2);
+ }
+
+ //@{
+
+ /** Assign from another Vector, shifting ownership. */
+ void operator=(Vector<T>& other) { this->shiftMem(other); }
+
+ /** Assign from another Vector, copying. */
+ void operator=(const Vector<T>& other) { this->clone(other); }
+
+ /** Return an alias to a segment of this Vector. */
+ Vector<T> segment(size_t start, size_t span)
+ {
+ T* wStart = this->mStart + start;
+ T* wEnd = wStart + span;
+ assert(wEnd<=this->mEnd);
+ return Vector<T>(NULL,wStart,wEnd);
+ }
+
+ /** Return an alias to a segment of this Vector. */
+ const Vector<T> segment(size_t start, size_t span) const
+ {
+ T* wStart = this->mStart + start;
+ T* wEnd = wStart + span;
+ assert(wEnd<=this->mEnd);
+ return Vector<T>(NULL,wStart,wEnd);
+ }
+
+ Vector<T> head(size_t span) { return segment(0,span); }
+ const Vector<T> head(size_t span) const { return segment(0,span); }
+ Vector<T> tail(size_t start) { return segment(start,this->size()-start); }
+ const Vector<T> tail(size_t start) const { return segment(start,this->size()-start); }
+
+ /**@name Iterator types. */
+ //@{
+ typedef T* iterator;
+ typedef const T* const_iterator;
+ //@}
+
+ //@}
};
+
/** Basic print operator for Vector objects. */
template <class T>
std::ostream& operator<<(std::ostream& os, const Vector<T>& v)
{
- for (unsigned i=0; i<v.size(); i++) os << v[i] << " ";
- return os;
+ for (unsigned i=0; i<v.size(); i++) os << v[i] << " ";
+ return os;
}
diff --git a/lib/decoding/Viterbi.h b/lib/decoding/Viterbi.h
new file mode 100644
index 0000000..8635bf5
--- /dev/null
+++ b/lib/decoding/Viterbi.h
@@ -0,0 +1,35 @@
+/*
+* Copyright 2013, 2014 Range Networks, Inc.
+*
+* This software is distributed under multiple licenses;
+* see the COPYING file in the main directory for licensing
+* information for this specific distribution.
+*
+* This use of this software may be subject to additional restrictions.
+* See the LEGAL file in the main directory for details.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+*/
+
+
+#ifndef _VITERBI_H_
+#define _VITERBI_H_ 1
+
+// (pat) Virtual base class for Viterbi and Turbo coder/decoders.
+class ViterbiBase {
+ public:
+ virtual void encode(const BitVector &in, BitVector& target) const = 0;
+ virtual void decode(const SoftVector &in, BitVector& target) = 0;
+ // (pat) Return error count from most recent decoder run.
+ // If you get -1 from this, the method is not defined in the Viterbi class.
+ virtual int getBEC() { return -1; }
+ //virtual ~ViterbiBase(); Currently None of these have destructors.
+
+ // These functions are logically part of the Viterbi functionality, even though they do not use any class variables.
+ unsigned applyPoly(uint64_t val, uint64_t poly);
+ unsigned applyPoly(uint64_t val, uint64_t poly, unsigned order);
+};
+#endif
diff --git a/lib/decoding/ViterbiR204.cpp b/lib/decoding/ViterbiR204.cpp
new file mode 100644
index 0000000..999b31b
--- /dev/null
+++ b/lib/decoding/ViterbiR204.cpp
@@ -0,0 +1,306 @@
+/*
+* Copyright 2008, 2009, 2014 Free Software Foundation, Inc.
+* Copyright 2014 Range Networks, Inc.
+*
+*
+* This software is distributed under the terms of the GNU Affero Public License.
+* See the COPYING file in the main directory for details.
+*
+* This use of this software may be subject to additional restrictions.
+* See the LEGAL file in the main directory for details.
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+
+
+
+#include "BitVector.h"
+#include "ViterbiR204.h"
+#include <iostream>
+#include <stdio.h>
+#include <sstream>
+#include <string.h>
+
+using namespace std;
+
+
+/**
+ Apply a Galois polymonial to a binary seqeunce.
+ @param val The input sequence.
+ @param poly The polynomial.
+ @param order The order of the polynomial.
+ @return Single-bit result.
+*/
+unsigned ViterbiBase::applyPoly(uint64_t val, uint64_t poly, unsigned order)
+{
+ uint64_t prod = val & poly;
+ unsigned sum = prod;
+ for (unsigned i=1; i<order; i++) sum ^= prod>>i;
+ return sum & 0x01;
+}
+
+unsigned ViterbiBase::applyPoly(uint64_t val, uint64_t poly)
+{
+ uint64_t prod = val & poly;
+ prod = (prod ^ (prod >> 32));
+ prod = (prod ^ (prod >> 16));
+ prod = (prod ^ (prod >> 8));
+ prod = (prod ^ (prod >> 4));
+ prod = (prod ^ (prod >> 2));
+ prod = (prod ^ (prod >> 1));
+ return prod & 0x01;
+}
+
+
+
+//void BitVector::encode(const ViterbiR2O4& coder, BitVector& target)
+void ViterbiR2O4::encode(const BitVector& in, BitVector& target) const
+{
+ const ViterbiR2O4& coder = *this;
+ size_t sz = in.size();
+
+ assert(sz*coder.iRate() == target.size());
+
+ // Build a "history" array where each element contains the full history.
+ uint32_t history[sz];
+ uint32_t accum = 0;
+ for (size_t i=0; i<sz; i++) {
+ accum = (accum<<1) | in.bit(i);
+ history[i] = accum;
+ }
+
+ // Look up histories in the pre-generated state table.
+ char *op = target.begin();
+ for (size_t i=0; i<sz; i++) {
+ unsigned index = coder.cMask() & history[i];
+ for (unsigned g=0; g<coder.iRate(); g++) {
+ *op++ = coder.stateTable(g,index);
+ }
+ }
+}
+
+
+ViterbiR2O4::ViterbiR2O4()
+{
+ assert(mDeferral < 32);
+ // (pat) The generator polynomials are: G0 = 1 + D**3 + D**4; and G1 = 1 + D + D**3 + D**4
+ mCoeffs[0] = 0x019; // G0 = D**4 + D**3 + 1; represented as binary 11001,
+ mCoeffs[1] = 0x01b; // G1 = + D**4 + D**3 + D + 1; represented as binary 11011
+ computeStateTables(0);
+ computeStateTables(1);
+ computeGeneratorTable();
+}
+
+
+void ViterbiR2O4::initializeStates()
+{
+ for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
+ for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
+}
+
+
+
+// (pat) The state machine has 16 states.
+// Each state has two possible next states corresponding to 0 or 1 inputs to original encoder.
+// which are saved in mStateTable in consecutive locations.
+// In other words the mStateTable second index is ((current_state <<1) + encoder_bit)
+// g is 0 or 1 for the first or second bit of the encoded stream, ie, the one we are decoding.
+void ViterbiR2O4::computeStateTables(unsigned g)
+{
+ assert(g<mIRate);
+ for (unsigned state=0; state<mIStates; state++) {
+ // 0 input
+ uint32_t inputVal = state<<1;
+ mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g], mOrder+1);
+ // 1 input
+ inputVal |= 1;
+ mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g], mOrder+1);
+ }
+}
+
+void ViterbiR2O4::computeGeneratorTable()
+{
+ for (unsigned index=0; index<mIStates*2; index++) {
+ mGeneratorTable[index] = (mStateTable[0][index]<<1) | mStateTable[1][index];
+ }
+}
+
+
+void ViterbiR2O4::branchCandidates()
+{
+ // Branch to generate new input states.
+ const vCand *sp = mSurvivors;
+ for (unsigned i=0; i<mNumCands; i+=2) {
+ // extend and suffix
+ const uint32_t iState0 = (sp->iState) << 1; // input state for 0
+ const uint32_t iState1 = iState0 | 0x01; // input state for 1
+ const uint32_t oStateShifted = (sp->oState) << mIRate; // shifted output (by 2)
+ const float cost = sp->cost;
+ int bec = sp->bitErrorCnt;
+ sp++;
+ // 0 input extension
+ mCandidates[i].cost = cost;
+ // mCMask is the low 5 bits, ie, full width of mGeneratorTable.
+ mCandidates[i].oState = oStateShifted | mGeneratorTable[iState0 & mCMask];
+ mCandidates[i].iState = iState0;
+ mCandidates[i].bitErrorCnt = bec;
+ // 1 input extension
+ mCandidates[i+1].cost = cost;
+ mCandidates[i+1].oState = oStateShifted | mGeneratorTable[iState1 & mCMask];
+ mCandidates[i+1].iState = iState1;
+ mCandidates[i+1].bitErrorCnt = bec;
+ }
+}
+
+
+void ViterbiR2O4::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
+{
+ const float *cTab[2] = {matchCost,mismatchCost};
+ for (unsigned i=0; i<mNumCands; i++) {
+ vCand& thisCand = mCandidates[i];
+ // We examine input bits 2 at a time for a rate 1/2 coder.
+ // (pat) mismatched will end up with bits in it for previous transitions,
+ // but we only use the bottom two bits of mismatched so it is ok.
+ const unsigned mismatched = inSample ^ (thisCand.oState);
+ // (pat) TODO: Are these two tests swapped?
+ thisCand.cost += cTab[mismatched&0x01][1] + cTab[(mismatched>>1)&0x01][0];
+ if (mismatched & 1) { thisCand.bitErrorCnt++; }
+ if (mismatched & 2) { thisCand.bitErrorCnt++; }
+ }
+}
+
+
+void ViterbiR2O4::pruneCandidates()
+{
+ const vCand* c1 = mCandidates; // 0-prefix
+ const vCand* c2 = mCandidates + mIStates; // 1-prefix
+ for (unsigned i=0; i<mIStates; i++) {
+ if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
+ else mSurvivors[i] = c2[i];
+ }
+}
+
+
+const ViterbiR2O4::vCand& ViterbiR2O4::minCost() const
+{
+ int minIndex = 0;
+ float minCost = mSurvivors[0].cost;
+ for (unsigned i=1; i<mIStates; i++) {
+ const float thisCost = mSurvivors[i].cost;
+ if (thisCost>=minCost) continue;
+ minCost = thisCost;
+ minIndex=i;
+ }
+ return mSurvivors[minIndex];
+}
+
+
+const ViterbiR2O4::vCand* ViterbiR2O4::vstep(uint32_t inSample, const float *probs, const float *iprobs, bool isNotTailBits)
+{
+ branchCandidates();
+ // (pat) tail bits do not affect cost or error bit count of any branch.
+ if (isNotTailBits) getSoftCostMetrics(inSample,probs,iprobs);
+ pruneCandidates();
+ return &minCost();
+}
+
+
+void ViterbiR2O4::decode(const SoftVector &in, BitVector& target)
+{
+ ViterbiR2O4& decoder = *this;
+ const size_t sz = in.size();
+ const unsigned oSize = in.size() / decoder.iRate();
+ const unsigned deferral = decoder.deferral();
+ const size_t ctsz = sz + deferral*decoder.iRate();
+ assert(sz <= decoder.iRate()*target.size());
+
+ // Build a "history" array where each element contains the full history.
+ // (pat) We only use every other history element, so why are we setting them?
+ uint32_t history[ctsz];
+ {
+ BitVector bits = in.sliced();
+ uint32_t accum = 0;
+ for (size_t i=0; i<sz; i++) {
+ accum = (accum<<1) | bits.bit(i);
+ history[i] = accum;
+ }
+ // Repeat last bit at the end.
+ // (pat) TODO: really? Does this matter?
+ for (size_t i=sz; i<ctsz; i++) {
+ accum = (accum<<1) | (accum & 0x01);
+ history[i] = accum;
+ }
+ }
+
+ // Precompute metric tables.
+ float matchCostTable[ctsz];
+ float mismatchCostTable[ctsz];
+ {
+ const float *dp = in.begin();
+ for (size_t i=0; i<sz; i++) {
+ // pVal is the probability that a bit is correct.
+ // ipVal is the probability that a bit is incorrect.
+ float pVal = dp[i];
+ if (pVal>0.5F) pVal = 1.0F-pVal;
+ float ipVal = 1.0F-pVal;
+ // This is a cheap approximation to an ideal cost function.
+ if (pVal<0.01F) pVal = 0.01;
+ if (ipVal<0.01F) ipVal = 0.01;
+ matchCostTable[i] = 0.25F/ipVal;
+ mismatchCostTable[i] = 0.25F/pVal;
+ }
+
+ // pad end of table with unknowns
+ // Note that these bits should not contribute to Bit Error Count.
+ for (size_t i=sz; i<ctsz; i++) {
+ matchCostTable[i] = 0.5F;
+ mismatchCostTable[i] = 0.5F;
+ }
+ }
+
+ {
+ decoder.initializeStates();
+ // Each sample of history[] carries its history.
+ // So we only have to process every iRate-th sample.
+ const unsigned step = decoder.iRate();
+ // input pointer
+ const uint32_t *ip = history + step - 1;
+ // output pointers
+ char *op = target.begin();
+ const char *const opt = target.end(); // (pat) Not right if target is larger than needed; should be: op + sz/2;
+ // table pointers
+ const float* match = matchCostTable;
+ const float* mismatch = mismatchCostTable;
+ size_t oCount = 0;
+ const ViterbiR2O4::vCand *minCost = NULL;
+ while (op<opt) {
+ // Viterbi algorithm
+ assert(match-matchCostTable<(float)sizeof(matchCostTable)/sizeof(matchCostTable[0])-1);
+ assert(mismatch-mismatchCostTable<(float)sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1);
+ minCost = decoder.vstep(*ip, match, mismatch, oCount < oSize);
+ ip += step;
+ match += step;
+ mismatch += step;
+ // output
+ if (oCount>=deferral) *op++ = (minCost->iState >> deferral)&0x01;
+ oCount++;
+ }
+ // Dont think minCost == NULL can happen.
+ mBitErrorCnt = minCost ? minCost->bitErrorCnt : 0;
+ }
+}
+
+// vim: ts=4 sw=4
diff --git a/lib/decoding/ViterbiR204.h b/lib/decoding/ViterbiR204.h
new file mode 100644
index 0000000..4fd053f
--- /dev/null
+++ b/lib/decoding/ViterbiR204.h
@@ -0,0 +1,149 @@
+/*
+* Copyright 2008, 2009, 2014 Free Software Foundation, Inc.
+* Copyright 2014 Range Networks, Inc.
+*
+* This software is distributed under the terms of the GNU Affero Public License.
+* See the COPYING file in the main directory for details.
+*
+* This use of this software may be subject to additional restrictions.
+* See the LEGAL file in the main directory for details.
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#ifndef _VITERBIR204_H_
+#define _VITERBIR204_H_ 1
+
+#include "Viterbi.h"
+
+
+/**
+ Class to represent convolutional coders/decoders of rate 1/2, memory length 4.
+ This is the "workhorse" coder for most GSM channels.
+*/
+class ViterbiR2O4 : public ViterbiBase {
+
+ private:
+ /**name Lots of precomputed elements so the compiler can optimize like hell. */
+ //@{
+ /**@name Core values. */
+ //@{
+ static const unsigned mIRate = 2; ///< reciprocal of rate
+ static const unsigned mOrder = 4; ///< memory length of generators
+ //@}
+ /**@name Derived values. */
+ //@{
+ static const unsigned mIStates = 0x01 << mOrder; ///< (16) number of states, number of survivors
+ static const uint32_t mSMask = mIStates-1; ///< survivor mask
+ static const uint32_t mCMask = (mSMask<<1) | 0x01; ///< candidate mask
+ static const uint32_t mOMask = (0x01<<mIRate)-1; ///< ouput mask, all iRate low bits set
+ static const unsigned mNumCands = mIStates*2; ///< number of candidates to generate during branching
+ static const unsigned mDeferral = 6*mOrder; ///< deferral to be used
+ //@}
+ //@}
+
+ /** Precomputed tables. */
+ //@{
+ uint32_t mCoeffs[mIRate]; ///< polynomial for each generator
+ // (pat) There are 16 states, each of which has two possible output states.
+ // These are stored in these two tables in consecutive locations.
+ uint32_t mStateTable[mIRate][2*mIStates]; ///< precomputed generator output tables
+ // mGeneratorTable is the encoder output state for a given input state and encoder input bit.
+ uint32_t mGeneratorTable[2*mIStates]; ///< precomputed coder output table
+ //@}
+ int mBitErrorCnt;
+
+ public:
+
+ /**
+ A candidate sequence in a Viterbi decoder.
+ The 32-bit state register can support a deferral of 6 with a 4th-order coder.
+ */
+ typedef struct candStruct {
+ uint32_t iState; ///< encoder input associated with this candidate
+ uint32_t oState; ///< encoder output associated with this candidate
+ float cost; ///< cost (metric value), float to support soft inputs
+ int bitErrorCnt; ///< number of bit errors in the encoded vector being decoded.
+ } vCand;
+
+ /** Clear a structure. */
+ void vitClear(vCand& v)
+ {
+ v.iState=0;
+ v.oState=0;
+ v.cost=0;
+ v.bitErrorCnt = 0;
+ }
+
+
+ private:
+
+ /**@name Survivors and candidates. */
+ //@{
+ vCand mSurvivors[mIStates]; ///< current survivor pool
+ vCand mCandidates[2*mIStates]; ///< current candidate pool
+ //@}
+
+ public:
+
+ unsigned iRate() const { return mIRate; }
+ uint32_t cMask() const { return mCMask; }
+ uint32_t stateTable(unsigned g, unsigned i) const { return mStateTable[g][i]; }
+ unsigned deferral() const { return mDeferral; }
+
+
+ ViterbiR2O4();
+
+ /** Set all cost metrics to zero. */
+ void initializeStates();
+
+ /**
+ Full cycle of the Viterbi algorithm: branch, metrics, prune, select.
+ @return reference to minimum-cost candidate.
+ */
+ const vCand* vstep(uint32_t inSample, const float *probs, const float *iprobs, bool isNotTailBits);
+
+ private:
+
+ /** Branch survivors into new candidates. */
+ void branchCandidates();
+
+ /** Compute cost metrics for soft-inputs. */
+ void getSoftCostMetrics(uint32_t inSample, const float *probs, const float *iprobs);
+
+ /** Select survivors from the candidate set. */
+ void pruneCandidates();
+
+ /** Find the minimum cost survivor. */
+ const vCand& minCost() const;
+
+ /**
+ Precompute the state tables.
+ @param g Generator index 0..((1/rate)-1)
+ */
+ void computeStateTables(unsigned g);
+
+ /**
+ Precompute the generator outputs.
+ mCoeffs must be defined first.
+ */
+ void computeGeneratorTable();
+
+ public:
+ void encode(const BitVector &in, BitVector& target) const;
+ void decode(const SoftVector &in, BitVector& target);
+ int getBEC() { return mBitErrorCnt; }
+};
+#endif
diff --git a/lib/decoding/VocoderFrame.h b/lib/decoding/VocoderFrame.h
deleted file mode 100644
index 0c80973..0000000
--- a/lib/decoding/VocoderFrame.h
+++ /dev/null
@@ -1,43 +0,0 @@
-#ifndef _VOCODERFRAME_H
-#define _VOCODERFRAME_H
-
-#include "BitVector.h"
-//#include "GSMCommon.h"
-
-class VocoderFrame : public BitVector {
-
- public:
-
- VocoderFrame()
- :BitVector(264)
- { fillField(0,0x0d,4); }
-
- /** Construct by unpacking a char[33]. */
- VocoderFrame(const unsigned char *src)
- :BitVector(264)
- { unpack(src); }
-
- BitVector payload() { return tail(4); }
- const BitVector payload() const { return tail(4); }
-
-};
-
-class VocoderAMRFrame : public BitVector {
-
- public:
-
- VocoderAMRFrame()
- :BitVector(244+8)
- { fillField(0,0x3c,8); /* AMR-NB 12.2 */ }
-
- /** Construct by unpacking a char[32]. */
- VocoderAMRFrame(const unsigned char *src)
- :BitVector(244+8)
- { unpack(src); }
-
- BitVector payload() { return tail(8); }
- const BitVector payload() const { return tail(8); }
-
-};
-
-#endif
diff --git a/lib/decoding/tch_f_decoder_impl.cc b/lib/decoding/tch_f_decoder_impl.cc
index a775391..89b8520 100644
--- a/lib/decoding/tch_f_decoder_impl.cc
+++ b/lib/decoding/tch_f_decoder_impl.cc
@@ -49,7 +49,19 @@ namespace gr {
gr::io_signature::make(0, 0, 0),
gr::io_signature::make(0, 0, 0)),
d_tch_mode(mode),
- d_collected_bursts_num(0)
+ d_collected_bursts_num(0),
+ mBlockCoder(0x10004820009ULL, 40, 224),
+ mU(228),
+ mP(mU.segment(184,40)),
+ mD(mU.head(184)),
+ mDP(mU.head(224)),
+ mC(CONV_SIZE),
+ mClass1_c(mC.head(378)),
+ mClass2_c(mC.segment(378, 78)),
+ mTCHU(189),
+ mTCHD(260),
+ mClass1A_d(mTCHD.head(50)),
+ mTCHParity(0x0b, 3, 50)
{
d_speech_file = fopen( file.c_str(), "wb" );
if (d_speech_file == NULL)
@@ -57,9 +69,7 @@ namespace gr {
throw std::runtime_error("TCH/F Decoder: can't open file\n");
}
- const unsigned char amr_nb_magic[6] = { 0x23, 0x21, 0x41, 0x4d, 0x52, 0x0a };
-
- if (d_tch_mode == MODE_SPEECH_EFR)
+ if (d_tch_mode != TCH_FS)
{
fwrite(amr_nb_magic, 1, 6, d_speech_file);
}
@@ -76,6 +86,8 @@ namespace gr {
message_port_register_in(pmt::mp("bursts"));
set_msg_handler(pmt::mp("bursts"), boost::bind(&tch_f_decoder_impl::decode, this, _1));
message_port_register_out(pmt::mp("msgs"));
+
+ setCodingMode(mode);
}
tch_f_decoder_impl::~tch_f_decoder_impl()
@@ -91,15 +103,6 @@ namespace gr {
if (d_collected_bursts_num == 8)
{
- unsigned char iBLOCK[2*BLOCKS*iBLOCK_SIZE];
- SoftVector mC(CONV_SIZE);
- SoftVector mClass1_c(mC.head(378));
- SoftVector mClass2_c(mC.segment(378, 78));
- BitVector mTCHU(189);
- BitVector mTCHD(260);
- BitVector mClass1A_d(mTCHD.head(50));
- ViterbiR2O4 mVCoder;
-
d_collected_bursts_num = 0;
// reorganize data
@@ -129,13 +132,7 @@ namespace gr {
// Decode stolen frames as FACCH/F
if (stolen)
{
- BitVector mU(228);
- BitVector mP(mU.segment(184,40));
- BitVector mD(mU.head(184));
- BitVector mDP(mU.head(224));
- Parity mBlockCoder(0x10004820009ULL, 40, 224);
-
- mC.decode(mVCoder, mU);
+ mVR204Coder.decode(mC, mU);
mP.invert();
unsigned syndrome = mBlockCoder.syndrome(mDP);
@@ -167,80 +164,203 @@ namespace gr {
}
}
- mClass1_c.decode(mVCoder, mTCHU);
- mClass2_c.sliced().copyToSegment(mTCHD, 182);
+ // Decode voice frames and write to file
+ if (d_tch_mode == TCH_FS || d_tch_mode == TCH_EFR)
+ {
+ mVR204Coder.decode(mClass1_c, mTCHU);
+ mClass2_c.sliced().copyToSegment(mTCHD, 182);
+
+ // 3.1.2.1
+ // copy class 1 bits u[] to d[]
+ for (unsigned k = 0; k <= 90; k++) {
+ mTCHD[2*k] = mTCHU[k];
+ mTCHD[2*k+1] = mTCHU[184-k];
+ }
- // 3.1.2.1
- // copy class 1 bits u[] to d[]
- for (unsigned k = 0; k <= 90; k++) {
- mTCHD[2*k] = mTCHU[k];
- mTCHD[2*k+1] = mTCHU[184-k];
- }
+ // 3.1.2.1
+ // check parity of class 1A
+ unsigned sentParity = (~mTCHU.peekField(91, 3)) & 0x07;
+ unsigned calcParity = mClass1A_d.parity(mTCHParity) & 0x07;
- Parity mTCHParity(0x0b, 3, 50);
+ bool good = (sentParity == calcParity);
- // 3.1.2.1
- // check parity of class 1A
- unsigned sentParity = (~mTCHU.peekField(91, 3)) & 0x07;
- //unsigned calcParity = mTCHD.head(50).parity(mTCHParity) & 0x07;
- unsigned calcParity = mClass1A_d.parity(mTCHParity) & 0x07;
+ if (good)
+ {
+ unsigned char frameBuffer[33];
+ unsigned int mTCHFrameLength;
- // 3.1.2.2
- // Check the tail bits, too.
- unsigned tail = mTCHU.peekField(185, 4);
+ if (d_tch_mode == TCH_FS) // GSM-FR
+ {
+ unsigned char mFrameHeader = 0x0d;
+ mTCHFrameLength = 33;
- bool good = (sentParity == calcParity) && (tail == 0);
+ // Undo Um's importance-sorted bit ordering.
+ // See GSM 05.03 3.1 and Table 2.
+ BitVector frFrame(260 + 4); // FR has a frameheader of 4 bits only
+ BitVector payload = frFrame.tail(4);
- if (good)
- {
- unsigned char mTCHFrame[33];
- unsigned int mTCHFrameLength;
+ mTCHD.unmap(GSM::g610BitOrder, 260, payload);
+ frFrame.pack(frameBuffer);
- if (d_tch_mode == MODE_SPEECH_FR) // GSM-FR
- {
- // Undo Um's importance-sorted bit ordering.
- // See GSM 05.03 3.1 and Tablee 2.
- VocoderFrame mVFrame;
-
- BitVector payload = mVFrame.payload();
- mTCHD.unmap(GSM::g610BitOrder, 260, payload);
- mVFrame.pack(mTCHFrame);
- mTCHFrameLength = 33;
- }
- else if (d_tch_mode == MODE_SPEECH_EFR) // GSM-EFR / AMR 12.2
- {
- VocoderAMRFrame mVFrameAMR;
+ }
+ else if (d_tch_mode == TCH_EFR) // GSM-EFR
+ {
+ unsigned char mFrameHeader = 0x3c;
+
+ // AMR Frame, consisting of a 8 bit frame header, plus the payload from decoding
+ BitVector amrFrame(244 + 8); // Same output length as AMR 12.2
+ BitVector payload = amrFrame.tail(8);
+
+ BitVector TCHW(260), EFRBits(244);
+
+ // write frame header
+ amrFrame.fillField(0, mFrameHeader, 8);
+
+ // Undo Um's EFR bit ordering.
+ mTCHD.unmap(GSM::g660BitOrder, 260, TCHW);
- BitVector payload = mVFrameAMR.payload();
- BitVector TCHW(260), EFRBits(244);
+ // Remove repeating bits and CRC to get raw EFR frame (244 bits)
+ for (unsigned k=0; k<71; k++)
+ EFRBits[k] = TCHW[k] & 1;
- // Undo Um's EFR bit ordering.
- mTCHD.unmap(GSM::g660BitOrder, 260, TCHW);
+ for (unsigned k=73; k<123; k++)
+ EFRBits[k-2] = TCHW[k] & 1;
- // Remove repeating bits and CRC to get raw EFR frame (244 bits)
- for (unsigned k=0; k<71; k++)
- EFRBits[k] = TCHW[k] & 1;
+ for (unsigned k=125; k<178; k++)
+ EFRBits[k-4] = TCHW[k] & 1;
- for (unsigned k=73; k<123; k++)
- EFRBits[k-2] = TCHW[k] & 1;
+ for (unsigned k=180; k<230; k++)
+ EFRBits[k-6] = TCHW[k] & 1;
- for (unsigned k=125; k<178; k++)
- EFRBits[k-4] = TCHW[k] & 1;
+ for (unsigned k=232; k<252; k++)
+ EFRBits[k-8] = TCHW[k] & 1;
- for (unsigned k=180; k<230; k++)
- EFRBits[k-6] = TCHW[k] & 1;
+ // Map bits as AMR 12.2k
+ EFRBits.map(GSM::gAMRBitOrderTCH_AFS12_2, 244, payload);
- for (unsigned k=232; k<252; k++)
- EFRBits[k-8] = TCHW[k] & 1;
+ // Put the whole frame (hdr + payload)
+ mTCHFrameLength = 32;
+ amrFrame.pack(frameBuffer);
- // Map bits as AMR 12.2k
- EFRBits.map(GSM::g690_12_2_BitOrder, 244, payload);
+ }
+ fwrite(frameBuffer, 1 , mTCHFrameLength, d_speech_file);
+ }
+ }
+ else
+ {
+ // Handle inband bits, see 3.9.4.1
+ // OpenBTS source takes last 8 bits as inband bits for some reason. This may be either a
+ // divergence between their implementation and GSM specification, which works because
+ // both their encoder and decoder do it same way, or they handle the issue at some other place
+ // SoftVector cMinus8 = mC.segment(0, mC.size() - 8);
+ SoftVector cMinus8 = mC.segment(8, mC.size());
+ cMinus8.copyUnPunctured(mTCHUC, mPuncture, mPunctureLth);
+
+ // 3.9.4.4
+ // decode from uc[] to u[]
+ mViterbi->decode(mTCHUC, mTCHU);
+
+ // 3.9.4.3 -- class 1a bits in u[] to d[]
+ for (unsigned k=0; k < mClass1ALth; k++) {
+ mTCHD[k] = mTCHU[k];
+ }
- // Put the whole frame (hdr + payload)
- mVFrameAMR.pack(mTCHFrame);
- mTCHFrameLength = 32;
+ // 3.9.4.3 -- class 1b bits in u[] to d[]
+ for (unsigned k=0; k < mClass1BLth; k++) {
+ mTCHD[k+mClass1ALth] = mTCHU[k+mClass1ALth+6];
}
- fwrite(mTCHFrame, 1 , mTCHFrameLength, d_speech_file);
+
+ // Check parity
+ unsigned sentParity = (~mTCHU.peekField(mClass1ALth,6)) & 0x3f;
+ BitVector class1A = mTCHU.segment(0, mClass1ALth);
+ unsigned calcParity = class1A.parity(mTCHParity) & 0x3f;
+
+ bool good = (sentParity == calcParity);
+
+ if (good)
+ {
+ unsigned char frameBuffer[mAMRFrameLth];
+ // AMR Frame, consisting of a 8 bit frame header, plus the payload from decoding
+ BitVector amrFrame(mKd + 8);
+ BitVector payload = amrFrame.tail(8);
+
+ // write frame header
+ amrFrame.fillField(0, mAMRFrameHeader, 8);
+
+ // We don't unmap here, but copy the decoded bits directly
+ // Decoder already delivers correct bit order
+ // mTCHD.unmap(mAMRBitOrder, payload.size(), payload);
+ mTCHD.copyTo(payload);
+ amrFrame.pack(frameBuffer);
+ fwrite(frameBuffer, 1 , mAMRFrameLth, d_speech_file);
+ }
+ }
+ }
+ }
+
+ void tch_f_decoder_impl::setCodingMode(tch_mode mode)
+ {
+ if (d_tch_mode != TCH_FS && d_tch_mode != TCH_EFR)
+ {
+ mKd = GSM::gAMRKd[d_tch_mode];
+ mTCHD.resize(mKd);
+ mTCHU.resize(mKd+6);
+ mTCHParity = Parity(0x06f,6, GSM::gAMRClass1ALth[d_tch_mode]);
+ mAMRBitOrder = GSM::gAMRBitOrder[d_tch_mode];
+ mClass1ALth = GSM::gAMRClass1ALth[d_tch_mode];
+ mClass1BLth = GSM::gAMRKd[d_tch_mode] - GSM::gAMRClass1ALth[d_tch_mode];
+ mTCHUC.resize(GSM::gAMRTCHUCLth[d_tch_mode]);
+ mPuncture = GSM::gAMRPuncture[d_tch_mode];
+ mPunctureLth = GSM::gAMRPunctureLth[d_tch_mode];
+ mClass1A_d.dup(mTCHD.head(mClass1ALth));
+
+ switch (d_tch_mode)
+ {
+ case TCH_AFS12_2:
+ mViterbi = new ViterbiTCH_AFS12_2();
+ mAMRFrameLth = 32;
+ mAMRFrameHeader = 0x3c;
+ break;
+ case TCH_AFS10_2:
+ mViterbi = new ViterbiTCH_AFS10_2();
+ mAMRFrameLth = 27;
+ mAMRFrameHeader = 0x3c;
+ break;
+ case TCH_AFS7_95:
+ mViterbi = new ViterbiTCH_AFS7_95();
+ mAMRFrameLth = 21;
+ mAMRFrameHeader = 0x3c;
+ break;
+ case TCH_AFS7_4:
+ mViterbi = new ViterbiTCH_AFS7_4();
+ mAMRFrameLth = 20;
+ mAMRFrameHeader = 0x3c;
+ break;
+ case TCH_AFS6_7:
+ mViterbi = new ViterbiTCH_AFS6_7();
+ mAMRFrameLth = 18;
+ mAMRFrameHeader = 0x3c;
+ break;
+ case TCH_AFS5_9:
+ mViterbi = new ViterbiTCH_AFS5_9();
+ mAMRFrameLth = 16;
+ mAMRFrameHeader = 0x14;
+ break;
+ case TCH_AFS5_15:
+ mViterbi = new ViterbiTCH_AFS5_15();
+ mAMRFrameLth = 14;
+ mAMRFrameHeader = 0x3c;
+ break;
+ case TCH_AFS4_75:
+ mViterbi = new ViterbiTCH_AFS4_75();
+ mAMRFrameLth = 13;
+ mAMRFrameHeader = 0x3c;
+ break;
+ default:
+ mViterbi = new ViterbiTCH_AFS12_2();
+ mAMRFrameLth = 32;
+ mAMRFrameHeader = 0x3c;
+ break;
}
}
}
diff --git a/lib/decoding/tch_f_decoder_impl.h b/lib/decoding/tch_f_decoder_impl.h
index 4ceabd6..4c8a4b9 100644
--- a/lib/decoding/tch_f_decoder_impl.h
+++ b/lib/decoding/tch_f_decoder_impl.h
@@ -23,10 +23,12 @@
#ifndef INCLUDED_GSM_TCH_F_DECODER_IMPL_H
#define INCLUDED_GSM_TCH_F_DECODER_IMPL_H
-#include "VocoderFrame.h"
+#include "AmrCoder.h"
+#include "BitVector.h"
+#include "GSM503Tables.h"
#include "GSM610Tables.h"
#include "GSM660Tables.h"
-#include "GSM690Tables.h"
+#include "ViterbiR204.h"
#include <grgsm/decoding/tch_f_decoder.h>
@@ -52,7 +54,39 @@ namespace gr {
pmt::pmt_t d_bursts[8];
FILE * d_speech_file;
enum tch_mode d_tch_mode;
+
+ BitVector mU;
+ BitVector mP;
+ BitVector mD;
+ BitVector mDP;
+ BitVector mTCHU;
+ BitVector mTCHD;
+ BitVector mClass1A_d;
+ SoftVector mC;
+ SoftVector mClass1_c;
+ SoftVector mClass2_c;
+ SoftVector mTCHUC;
+
+ Parity mBlockCoder;
+ Parity mTCHParity;
+
+ ViterbiR2O4 mVR204Coder;
+ ViterbiBase *mViterbi;
+
+ const unsigned char amr_nb_magic[6] = { 0x23, 0x21, 0x41, 0x4d, 0x52, 0x0a };
+ unsigned char iBLOCK[2*BLOCKS*iBLOCK_SIZE];
+ unsigned char mAMRFrameHeader;
+
+ const unsigned *mAMRBitOrder;
+ const unsigned *mPuncture;
+ unsigned mClass1ALth;
+ unsigned mClass1BLth;
+ unsigned mPunctureLth;
+ uint8_t mAMRFrameLth;
+ uint8_t mKd;
+
void decode(pmt::pmt_t msg);
+ void setCodingMode(tch_mode mode);
public:
tch_f_decoder_impl(tch_mode mode, const std::string &file);
~tch_f_decoder_impl();