Created
May 5, 2022 17:58
-
-
Save nbecker/d1bf599b9fb8e022a292017aab707e3f to your computer and use it in GitHub Desktop.
baseline at 2 sample/symbol
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python | |
# coding: utf-8 | |
import numpy as np | |
from sionna.utils import QAMSource | |
from sionna.signal import Upsampling, Downsampling, RootRaisedCosineFilter, empirical_psd, empirical_aclr | |
beta = 0.1 | |
span_in_symbols = 32 | |
samples_per_symbol = 2 | |
rrcf = RootRaisedCosineFilter(span_in_symbols, samples_per_symbol, beta, normalize=False) | |
mf = RootRaisedCosineFilter(span_in_symbols, samples_per_symbol, beta, normalize=False) | |
mf._coefficients = mf._coefficients / samples_per_symbol | |
import tensorflow as tf | |
gpus = tf.config.list_physical_devices('GPU') | |
print('Number of GPUs available :', len(gpus)) | |
if gpus: | |
gpu_num = 0 # Index of the GPU to use | |
try: | |
tf.config.set_visible_devices(gpus[gpu_num], 'GPU') | |
print('Only GPU number', gpu_num, 'used.') | |
tf.config.experimental.set_memory_growth(gpus[gpu_num], True) | |
except RuntimeError as e: | |
print(e) | |
from tensorflow.keras import Model | |
from tensorflow.keras.layers import Layer, Dense | |
import sionna | |
from sionna.channel import AWGN | |
from sionna.utils import BinarySource, ebnodb2no, log10, expand_to_rank, insert_dims | |
from sionna.fec.ldpc.encoding import LDPC5GEncoder | |
from sionna.fec.ldpc.decoding import LDPC5GDecoder | |
from sionna.mapping import Mapper, Demapper, Constellation | |
from sionna.utils import sim_ber | |
def power (u): | |
return tf.norm (u)**2 / tf.cast (tf.size (u), u.dtype) | |
ebno_db_min = 7.0 | |
ebno_db_max = 10.0 | |
############################################### | |
# Modulation and coding configuration | |
############################################### | |
num_bits_per_symbol = 6 # Baseline is 64-QAM | |
modulation_order = 2**num_bits_per_symbol | |
coderate = 0.5 # Coderate for the outer code | |
n = 1500 # Codeword length [bit]. Must be a multiple of num_bits_per_symbol | |
num_symbols_per_codeword = n//num_bits_per_symbol # Number of modulated baseband symbols per codeword | |
k = int(n*coderate) # Number of information bits per codeword | |
class Baseline(Model): | |
def __init__(self): | |
super().__init__() | |
################ | |
## Transmitter | |
################ | |
self._binary_source = BinarySource() | |
self._encoder = LDPC5GEncoder(k, n) | |
constellation = Constellation("qam", num_bits_per_symbol, trainable=False) | |
self.constellation = constellation | |
self._mapper = Mapper(constellation=constellation) | |
################ | |
## Channel | |
################ | |
self._channel = AWGN() | |
self.us = Upsampling(samples_per_symbol) | |
self.ds = Downsampling(samples_per_symbol, rrcf.length-1) | |
################ | |
## Receiver | |
################ | |
self._demapper = Demapper("app", constellation=constellation) | |
self._decoder = LDPC5GDecoder(self._encoder, hard_out=True) | |
@tf.function(jit_compile=True) | |
def call(self, batch_size, ebno_db):#, perturbation_variance=tf.constant(0.0, tf.float32)): | |
# If `ebno_db` is a scalar, a tensor with shape [batch size] is created as it is what is expected by some layers | |
if len(ebno_db.shape) == 0: | |
ebno_db = tf.fill([batch_size], ebno_db) | |
no = ebnodb2no(ebno_db, num_bits_per_symbol, coderate) | |
no = expand_to_rank(no, 2) | |
################ | |
## Transmitter | |
################ | |
b = self._binary_source([batch_size, k]) | |
c = self._encoder(b) | |
# Modulation | |
x = self._mapper(c) # x [batch size, num_symbols_per_codeword] | |
# Upsample the QAM symbol sequence | |
x_us = self.us(x) | |
xfilt_out = rrcf(x_us) | |
# ################ | |
# ## Channel | |
# ################ | |
y = self._channel([xfilt_out, no*tf.cast(samples_per_symbol, no.dtype)]) # [batch size, num_symbols_per_codeword] | |
mf_out = mf (y) | |
# # Instantiate a downsampling layer | |
# num_symbols = x.shape[1] | |
# # Recover the transmitted symbol sequence | |
x_hat = self.ds(mf_out)[:,:x.shape[1]] | |
xconst_sqnorm = power (x) | |
signal = tf.reduce_sum (tf.multiply (x_hat, tf.math.conj(x)))/tf.cast(tf.size(x), x.dtype)/xconst_sqnorm | |
#print ('signal:', signal) | |
#print ('y:', y) | |
y = x_hat / signal | |
#tf.print ('y:', y) | |
################ | |
## Receiver | |
################ | |
#tf.print (y.shape, no.shape) | |
print ('trace demapper') | |
llr = self._demapper([y, no]) | |
# Outer decoding | |
b_hat = self._decoder(llr) | |
tf.print ('exec') | |
return b,b_hat # Ground truth and reconstructed information bits returned for BER/BLER computation | |
ebno_dbs = np.arange(ebno_db_min, # Min SNR for evaluation | |
ebno_db_max, # Max SNR for evaluation | |
0.5) # Step | |
model_baseline = Baseline() | |
_,bler = sim_ber(model_baseline, ebno_dbs, batch_size=128, num_target_block_errors=1000, max_mc_iter=1000) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment