summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorAndreas Eversberg <jolly@eversberg.eu>2010-09-23 21:05:18 +0200
committerAndreas Eversberg <jolly@eversberg.eu>2016-09-25 08:11:41 +0200
commit620366a951572d9660583f76f99d78a72c6ecdaf (patch)
tree4a15dc138d0d483ae6bfbe6751ca0129bbea4a98 /src
parentce3c88e075ee5d5e97f8497c0143aeedf3ad0f8c (diff)
[WIP] Safely change TPU offset on TS change or sync change
The TPU offset may vary some qbits, but on significant change, like a change to a new time slot, the TPU offset is always incremented. Changing backwards is performed by changing forwards and incrementing frame number. (This is because we wrap arround into the next frame.) The maximum amount of incrementation is half of a TDMA frame duration. This way we can be sure that the TPU always generates next IRQ later. If an incrmentation of more than half of a TDMA frame is required, the incrementation is performed in two steps, thus we can can be sure that each IRQ will always happen later. Example: TS 1 -> TS 2: increment by 625 qbits, keep same FN TS 1 -> TS 7: increment by 2500 qbits, then by 1250 qbits, keep same FN TS 7 -> TS 1: increment by 1250 qbits, increment FN TS 2 -> TS 1: increment by 2500 qbits, then by 1875 qbits, increment FN TODO: Prevent scheduling/transmission before and during change of TPU/FN.
Diffstat (limited to 'src')
-rw-r--r--src/target/firmware/layer1/l23_api.c14
-rw-r--r--src/target/firmware/layer1/prim_rach.c4
-rw-r--r--src/target/firmware/layer1/sync.c1
-rw-r--r--src/target/firmware/layer1/toa.c2
-rw-r--r--src/target/firmware/layer1/tpu_window.c52
5 files changed, 59 insertions, 14 deletions
diff --git a/src/target/firmware/layer1/l23_api.c b/src/target/firmware/layer1/l23_api.c
index 311ca6a2..2331d744 100644
--- a/src/target/firmware/layer1/l23_api.c
+++ b/src/target/firmware/layer1/l23_api.c
@@ -225,9 +225,11 @@ static void l1ctl_rx_dm_est_req(struct msgb *msg)
struct l1ctl_hdr *l1h = (struct l1ctl_hdr *) msg->data;
struct l1ctl_info_ul *ul = (struct l1ctl_info_ul *) l1h->data;
struct l1ctl_dm_est_req *est_req = (struct l1ctl_dm_est_req *) ul->payload;
+ uint8_t old_tn;
- printd("L1CTL_DM_EST_REQ (arfcn=%u, chan_nr=0x%02x, tsc=%u)\n",
- ntohs(est_req->h0.band_arfcn), ul->chan_nr, est_req->tsc);
+ printd("L1CTL_DM_EST_REQ (arfcn=%u, chan_nr=0x%02x, tsc=%u tn=%u)\n",
+ ntohs(est_req->h0.band_arfcn), ul->chan_nr, est_req->tsc,
+ ul->chan_nr & 0x7);
/* disable neighbour cell measurement of C0 TS 0 */
mframe_disable(MF_TASK_NEIGH_PM51_C0T0);
@@ -235,6 +237,7 @@ static void l1ctl_rx_dm_est_req(struct msgb *msg)
/* configure dedicated channel state */
l1s.dedicated.type = chan_nr2dchan_type(ul->chan_nr);
l1s.dedicated.tsc = est_req->tsc;
+ old_tn = l1s.dedicated.tn;
l1s.dedicated.tn = ul->chan_nr & 0x7;
l1s.dedicated.h = est_req->h;
@@ -264,6 +267,13 @@ static void l1ctl_rx_dm_est_req(struct msgb *msg)
/* figure out which MF tasks to enable */
l1a_mftask_set(chan_nr2mf_task_mask(ul->chan_nr, NEIGH_MODE_PM));
+
+ /* shift TPU according to chnage in TN */
+ if (l1s.dedicated.tn != old_tn) {
+ l1s.tpu_offset_correction += (l1s.dedicated.tn - old_tn) * 625;
+ printf("Shift TPU by %d TN (%d qbits)\n",
+ l1s.dedicated.tn - old_tn, l1s.tpu_offset_correction);
+ }
}
/* receive a L1CTL_DM_FREQ_REQ from L23 */
diff --git a/src/target/firmware/layer1/prim_rach.c b/src/target/firmware/layer1/prim_rach.c
index e6ea6568..9cbf5fae 100644
--- a/src/target/firmware/layer1/prim_rach.c
+++ b/src/target/firmware/layer1/prim_rach.c
@@ -138,6 +138,10 @@ void l1a_rach_req(uint16_t offset, uint8_t combined, uint8_t ra)
unsigned long flags;
offset += 3;
+ if (l1s.tpu_offset_correction) {
+ puts("RACH while TPU is not correct, delaying!\n");
+ offset += 2;
+ }
local_firq_save(flags);
if (combined) {
diff --git a/src/target/firmware/layer1/sync.c b/src/target/firmware/layer1/sync.c
index 36f42975..e12a65f1 100644
--- a/src/target/firmware/layer1/sync.c
+++ b/src/target/firmware/layer1/sync.c
@@ -367,6 +367,7 @@ void l1s_reset(void)
/* Leave dedicated mode */
l1s.dedicated.type = GSM_DCHAN_NONE;
+ l1s.dedicated.tn = 0;
/* reset scheduler and hardware */
sched_gsmtime_reset();
diff --git a/src/target/firmware/layer1/toa.c b/src/target/firmware/layer1/toa.c
index 7d80d952..16f99d5e 100644
--- a/src/target/firmware/layer1/toa.c
+++ b/src/target/firmware/layer1/toa.c
@@ -75,6 +75,6 @@ static void toa_ravg_output(struct running_avg *ravg, int32_t avg)
{
if (avg != 16) {
printf("TOA AVG is not 16 qbits, correcting (got %ld)\n", avg);
- l1s.tpu_offset_correction = avg - 16;
+ l1s.tpu_offset_correction += avg - 16;
}
}
diff --git a/src/target/firmware/layer1/tpu_window.c b/src/target/firmware/layer1/tpu_window.c
index f4e76c16..23d3244b 100644
--- a/src/target/firmware/layer1/tpu_window.c
+++ b/src/target/firmware/layer1/tpu_window.c
@@ -67,30 +67,60 @@ static const uint16_t tx_burst_duration[_NUM_L1_TXWIN] = {
[L1_TXWIN_AB] = L1_TX_AB_DURATION_Q,
};
+/* correct TPU window and compensate frame number */
+static int16_t tpu_correction(void)
+{
+ int16_t correction = l1s.tpu_offset_correction;
+
+ /* allow minimal corrections, like TOA */
+ if (correction >= -32 && correction <= 32) {
+ l1s.tpu_offset_correction = 0;
+ return correction;
+ }
+
+ /* except for minimal changes, always advance TPU offset */
+ if (correction < 0) {
+ printf("Negative TPU Chg (%u qbits), correcting forward...\n",
+ correction);
+ correction += L1_TDMA_LENGTH_Q;
+ l1s.tpu_offset_correction = correction;
+ l1s.current_time = l1s.next_time;
+ l1s_time_inc(&l1s.next_time, 1);
+ if (l1s.mframe_sched.safe_fn < GSM_MAX_FN)
+ ADD_MODULO(l1s.mframe_sched.safe_fn, 1, GSM_MAX_FN);
+ }
+ if (correction <= (L1_TDMA_LENGTH_Q >> 1)) {
+ printf("TPU Chg forth by %u qbits! (final step)\n",
+ correction);
+ l1s.tpu_offset_correction = 0;
+ } else {
+ /* no correction by more than half of the TDMA length */
+ correction = (L1_TDMA_LENGTH_Q >> 1);
+ printf("TPU Chg forth by %u qbits! (intermediate step)\n",
+ correction);
+ l1s.tpu_offset_correction -= (L1_TDMA_LENGTH_Q >> 1);
+ }
+
+ return correction;
+}
static int _win_setup(__unused uint8_t p1, __unused uint8_t p2, __unused uint16_t p3)
{
- uint8_t tn;
-
- rfch_get_params(&l1s.next_time, NULL, NULL, &tn);
+ int16_t correction = tpu_correction();
- l1s.tpu_offset = (5000 + l1s.tpu_offset + l1s.tpu_offset_correction) % 5000;
- l1s.tpu_offset_correction = 0;
+ l1s.tpu_offset = (l1s.tpu_offset + correction + L1_TDMA_LENGTH_Q)
+ % L1_TDMA_LENGTH_Q;
tpu_enq_at(4740);
- tpu_enq_sync((5000 + l1s.tpu_offset + (L1_BURST_LENGTH_Q * tn)) % 5000);
+ tpu_enq_sync(l1s.tpu_offset);
return 0;
}
static int _win_cleanup(__unused uint8_t p1, __unused uint8_t p2, __unused uint16_t p3)
{
- uint8_t tn;
-
- rfch_get_params(&l1s.next_time, NULL, NULL, &tn);
-
/* restore offset */
- tpu_enq_offset((5000 + l1s.tpu_offset + (L1_BURST_LENGTH_Q * tn)) % 5000);
+ tpu_enq_offset(l1s.tpu_offset);
return 0;
}