diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml index 041f75f..240f38b 100644 --- a/.github/workflows/pytest.yml +++ b/.github/workflows/pytest.yml @@ -32,4 +32,5 @@ jobs: - name: Test with pytest run: | pip install pytest - pytest --ignore-glob=*_figures.py \ No newline at end of file + pytest --ignore-glob=*_figures.py --ignore-glob=*_benchmark.py + diff --git a/.gitignore b/.gitignore index 34ef6f6..4e0f4eb 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ checkpoints/ lightning_logs/ *.pt *.jpg +*.benchmarks/ diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..46586ba --- /dev/null +++ b/Dockerfile @@ -0,0 +1,15 @@ +FROM pytorch/pytorch:1.13.1-cuda11.6-cudnn8-runtime + +ENV DEBIAN_FRONTEND=noninteractive + +ADD torchsig/ /build/torchsig + +ADD pyproject.toml /build/pyproject.toml + +RUN pip3 install /build + +RUN pip3 install notebook + +WORKDIR /workspace/code + +ADD examples/ /workspace/code/examples \ No newline at end of file diff --git a/README.md b/README.md index f85ddd8..5ad4c45 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,8 @@

- drawing + + + +

----- @@ -40,6 +43,17 @@ cd torchsig pip install . ``` +## Using the Dockerfile +If you have Docker installed along with compatible GPUs and drivers, you can try: + +``` +docker build -t torchsig -f Dockerfile . +docker run -d --rm --network=host --shm-size=32g --gpus all --name torchsig_workspace torchsig tail -f /dev/null +docker exec torchsig_workspace jupyter notebook --allow-root --ip=0.0.0.0 --no-browser +``` + +Then use the URL in the output in your browser to run the examples and notebooks. + ## License --- TorchSig is released under the MIT License. The MIT license is a popular open-source software license enabling free use, redistribution, and modifications, even for commercial purposes, provided the license is included in all copies or substantial portions of the software. TorchSig has no connection to MIT, other than through the use of this license. diff --git a/docs/torchsig_logo_white_dodgerblue.png b/docs/torchsig_logo_white_dodgerblue.png new file mode 100644 index 0000000..d11e6be Binary files /dev/null and b/docs/torchsig_logo_white_dodgerblue.png differ diff --git a/pyproject.toml b/pyproject.toml index 80f8178..4fca010 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,7 +12,7 @@ readme = "README.md" requires-python = ">=3.7" license = {text = "MIT"} dependencies = [ - "torch==1.13.0", + "torch==1.13.1", "torchvision", "tqdm", "numpy", diff --git a/tests/test_modulation_benchmark.py b/tests/test_modulation_benchmark.py new file mode 100644 index 0000000..d928a3e --- /dev/null +++ b/tests/test_modulation_benchmark.py @@ -0,0 +1,69 @@ +from torchsig.datasets.synthetic import ( + ConstellationDataset, + FSKDataset, + OFDMDataset, + default_const_map, + freq_map, +) +from matplotlib import pyplot as plt +import numpy as np +import pytest + + +def iterate_one_epoch(dataset): + for _ in dataset: + pass + + +@pytest.mark.benchmark(group="constellation") +@pytest.mark.parametrize("modulation_name", default_const_map.keys()) +def test_generate_constellation_benchmark(benchmark, modulation_name): + dataset = ConstellationDataset( + [modulation_name], + num_iq_samples=4096, + num_samples_per_class=100, + iq_samples_per_symbol=2, + pulse_shape_filter=None, + random_pulse_shaping=False, + random_data=False, + use_gpu=False, + ) + benchmark(iterate_one_epoch, dataset) + + +@pytest.mark.benchmark(group="fsk") +@pytest.mark.parametrize("modulation_name", freq_map.keys()) +def test_generate_fsk_benchmark(benchmark, modulation_name): + dataset = FSKDataset( + [modulation_name], + num_iq_samples=4096, + num_samples_per_class=100, + iq_samples_per_symbol=2, + random_pulse_shaping=False, + random_data=False, + use_gpu=False, + ) + benchmark(iterate_one_epoch, dataset) + + +num_subcarriers = (64, 72, 128, 180, 256, 300, 512, 600, 900, 1024, 1200, 2048) + + +@pytest.mark.benchmark(group="ofdm") +@pytest.mark.parametrize("num_subcarriers", num_subcarriers) +def test_generate_ofdm_benchmark(benchmark, num_subcarriers): + constellations = ("bpsk", "qpsk", "16qam", "64qam", "256qam", "1024qam") + sidelobe_suppression_methods = ("lpf", "win_start") + dataset = OFDMDataset( + constellations, + num_subcarriers=(num_subcarriers,), + num_iq_samples=4096, + num_samples_per_class=100, + sidelobe_suppression_methods=sidelobe_suppression_methods, + use_gpu=False, + ) + benchmark(iterate_one_epoch, dataset) + + +if __name__ == "__main__": + pytest.main() diff --git a/tests/test_transforms.py b/tests/test_transforms.py new file mode 100644 index 0000000..2e9c821 --- /dev/null +++ b/tests/test_transforms.py @@ -0,0 +1,121 @@ +from unittest import TestCase +from torchsig.transforms.system_impairment.si import RandomTimeShift, TimeCrop +import numpy as np + + +class RandomTimeShiftTestCase(TestCase): + def test_random_time_shift_right(self): + rng = np.random.RandomState(0) + data = ( + rng.rand( + 16, + ) + - 0.5 + ) + 1j * ( + rng.rand( + 16, + ) + - 0.5 + ) + shift = 5 + t = RandomTimeShift( + shift=shift, + ) + new_data = t(data) + self.assertTrue(np.allclose(data[:-shift], new_data[shift:])) + self.assertTrue(np.allclose(new_data[:shift], np.zeros(shift))) + + def test_random_time_shift_left(self): + rng = np.random.RandomState(0) + data = ( + rng.rand( + 16, + ) + - 0.5 + ) + 1j * ( + rng.rand( + 16, + ) + - 0.5 + ) + shift = -5 + t = RandomTimeShift( + shift=shift, + ) + new_data = t(data) + self.assertTrue(np.allclose(data[-shift:], new_data[:shift])) + self.assertTrue(np.allclose(new_data[shift:], np.zeros(np.abs(shift)))) + + +class TimeCropTestCase(TestCase): + def test_time_crop_start(self): + rng = np.random.RandomState(0) + num_iq_samples = 16 + data = ( + rng.rand( + num_iq_samples, + ) + - 0.5 + ) + 1j * ( + rng.rand( + num_iq_samples, + ) + - 0.5 + ) + length = 4 + t = TimeCrop( + crop_type="start", + length=length, + ) + new_data: np.ndarray = t(data) + self.assertTrue(np.allclose(data[:length], new_data)) + self.assertTrue(new_data.shape[0] == length) + + def test_time_crop_center(self): + rng = np.random.RandomState(0) + num_iq_samples = 16 + data = ( + rng.rand( + num_iq_samples, + ) + - 0.5 + ) + 1j * ( + rng.rand( + num_iq_samples, + ) + - 0.5 + ) + length = 4 + t = TimeCrop( + crop_type="center", + length=length, + ) + new_data: np.ndarray = t(data) + extra_samples = num_iq_samples - length + self.assertTrue( + np.allclose(data[extra_samples // 2 : -extra_samples // 2], new_data) + ) + self.assertTrue(new_data.shape[0] == length) + + def test_time_crop_end(self): + rng = np.random.RandomState(0) + num_iq_samples = 16 + data = ( + rng.rand( + num_iq_samples, + ) + - 0.5 + ) + 1j * ( + rng.rand( + num_iq_samples, + ) + - 0.5 + ) + length = 4 + t = TimeCrop( + crop_type="end", + length=length, + ) + new_data: np.ndarray = t(data) + self.assertTrue(np.allclose(data[-length:], new_data)) + self.assertTrue(new_data.shape[0] == length) diff --git a/torchsig/__init__.py b/torchsig/__init__.py index 493f741..260c070 100644 --- a/torchsig/__init__.py +++ b/torchsig/__init__.py @@ -1 +1 @@ -__version__ = "0.3.0" +__version__ = "0.3.1" diff --git a/torchsig/datasets/synthetic.py b/torchsig/datasets/synthetic.py index 3035744..5b2cb44 100644 --- a/torchsig/datasets/synthetic.py +++ b/torchsig/datasets/synthetic.py @@ -16,21 +16,22 @@ def torchsig_convolve( signal: np.ndarray, taps: np.ndarray, gpu: bool = False ) -> np.ndarray: + return sp.convolve(signal, taps, "same") # This will run into issues is signal is smaller than taps - torch_signal = torch.from_numpy(signal.astype(np.complex128)).reshape(1, -1) - torch_taps = torch.flip( - torch.from_numpy(taps.astype(np.complex128)).reshape(1, 1, -1), dims=(2,) - ) - if gpu: - result = torch.nn.functional.conv1d( - torch_signal.cuda(), torch_taps.cuda(), padding=torch_signal.shape[0] - 1 - ) - return result.cpu().numpy()[0] - - result = torch.nn.functional.conv1d( - torch_signal, torch_taps, padding=torch_signal.shape[0] - 1 - ) - return result.numpy()[0] + # torch_signal = torch.from_numpy(signal.astype(np.complex128)).reshape(1, -1) + # torch_taps = torch.flip( + # torch.from_numpy(taps.astype(np.complex128)).reshape(1, 1, -1), dims=(2,) + # ) + # if gpu: + # result = torch.nn.functional.conv1d( + # torch_signal.cuda(), torch_taps.cuda(), padding=torch_signal.shape[0] - 1 + # ) + # return result.cpu().numpy()[0] + + # result = torch.nn.functional.conv1d( + # torch_signal, torch_taps, padding=torch_signal.shape[0] - 1 + # ) + # return result.numpy()[0] def remove_corners(const): @@ -359,15 +360,17 @@ def _generate_samples(self, item: Tuple) -> np.ndarray: const = self.const_map[class_name] / np.mean(np.abs(self.const_map[class_name])) symbol_nums = np.random.randint( - 0, len(const), 2 * int(self.num_iq_samples / self.iq_samples_per_symbol) + 0, len(const), int(self.num_iq_samples / self.iq_samples_per_symbol) ) symbols = const[symbol_nums] zero_padded = np.zeros( (self.iq_samples_per_symbol * len(symbols),), dtype=np.complex64 ) zero_padded[:: self.iq_samples_per_symbol] = symbols + # excess bandwidth is defined in porportion to signal bandwidth, not sampling rate, + # thus needs to be scaled by the samples per symbol pulse_shape_filter_length = estimate_filter_length( - signal_description.excess_bandwidth + signal_description.excess_bandwidth / self.iq_samples_per_symbol ) pulse_shape_filter_span = int( (pulse_shape_filter_length - 1) / 2 @@ -501,14 +504,16 @@ def __init__( self.index = [] if "lpf" in sidelobe_suppression_methods: # Precompute LPF - num_taps = 50 - cutoff = 0.6 + cutoff = 0.3 + transition_bandwidth = (0.5 - cutoff) / 4 + num_taps = estimate_filter_length(transition_bandwidth) self.taps = sp.firwin( num_taps, cutoff, - width=cutoff * 0.02, + width=transition_bandwidth, window=sp.get_window("blackman", num_taps), scale=True, + fs=1, ) # Precompute all possible random symbols for speed at sample generation @@ -580,50 +585,29 @@ def _generate_samples(self, item: Tuple) -> np.ndarray: orig_state = np.random.get_state() if not self.random_data: np.random.seed(index) - - # Symbol multiplier: we want to be able to randomly index into - # generated IQ samples such that we can see symbol transitions. - # This multiplier ensures enough OFDM symbols are generated for - # this randomness. - # Check against max possible requirements - # 2x for symbol length - # 2x for number of symbols for at least 1 transition - # 4x for largest burst duration option - sym_mult = 1 - - if self.num_iq_samples <= 4 * 2 * 2 * num_subcarriers: - sym_mult = self.num_iq_samples / (2 * 2 * num_subcarriers) + 1e-6 - sym_mult = ( - int(np.ceil(sym_mult**-1)) - if sym_mult < 1.0 - else int(np.ceil(sym_mult)) - ) - - if self.num_iq_samples > 32768: - # assume wideband task and reduce data for speed - sym_mult = 0.3 - + if mod_type == "random": - # Randomized subcarrier modulations - symbols = [] - for subcarrier_idx in range(num_subcarriers): - curr_const = np.random.randint(len(self.random_symbols)) - symbols.extend( - np.random.choice( - self.random_symbols[curr_const], - size=int(2 * sym_mult * self.num_iq_samples / num_subcarriers), + symbols_idxs = np.random.randint(0, 1024, size=self.num_iq_samples) + const_idxes = np.random.choice( + range(len(self.random_symbols)), size=num_subcarriers + ) + symbols = np.zeros(self.num_iq_samples, dtype=np.complex128) + for subcarrier_idx, const_idx in enumerate(const_idxes): + begin_idx = (self.num_iq_samples) * subcarrier_idx + end_idx = (self.num_iq_samples) * (subcarrier_idx + 1) + symbols[begin_idx:end_idx] = self.random_symbols[const_idx][ + np.mod( + symbols_idxs[begin_idx:end_idx], + len(self.random_symbols[const_idx]), ) - ) - symbols = np.asarray(symbols) + ] else: # Fixed modulation across all subcarriers const_name = np.random.choice(self.constellations) const = default_const_map[const_name] / np.mean( np.abs(default_const_map[const_name]) ) - symbol_nums = np.random.randint( - 0, len(const), int(2 * sym_mult * self.num_iq_samples) - ) + symbol_nums = np.random.randint(0, len(const), int(self.num_iq_samples)) symbols = const[symbol_nums] divisible_index = -(len(symbols) % num_subcarriers) if divisible_index != 0: @@ -737,14 +721,16 @@ def _generate_samples(self, item: Tuple) -> np.ndarray: elif sidelobe_suppression_method == "rand_lpf": flattened = cyclic_prefixed.T.flatten() # Generate randomized LPF - cutoff = np.random.uniform(0.95, 0.95) - num_taps = estimate_filter_length(cutoff) + cutoff = np.random.uniform(0.25, 0.475) + transition_bandwidth = (0.5 - cutoff) / 4 + num_taps = estimate_filter_length(transition_bandwidth) taps = sp.firwin( num_taps, cutoff, - width=cutoff * 0.02, + width=transition_bandwidth, window=sp.get_window("blackman", num_taps), scale=True, + fs=1, ) # Apply random LPF output = torchsig_convolve(flattened, taps, gpu=self.use_gpu)[:-num_taps] @@ -818,7 +804,7 @@ def _generate_samples(self, item: Tuple) -> np.ndarray: ] # Randomize the start index (while bypassing the initial windowing if present) - if sym_mult == 1 and num_subcarriers * 4 * burst_dur < self.num_iq_samples: + if num_subcarriers * 4 * burst_dur < self.num_iq_samples: start_idx = np.random.randint(0, output.shape[0] - self.num_iq_samples) else: if original_on: @@ -945,7 +931,7 @@ def _generate_samples(self, item: Tuple) -> np.ndarray: symbol_nums = np.random.randint( 0, len(const_oversampled), - int(2 * self.num_iq_samples / samples_per_symbol_recalculated), + int(self.num_iq_samples / samples_per_symbol_recalculated), ) symbols = const_oversampled[symbol_nums] diff --git a/torchsig/transforms/system_impairment/si.py b/torchsig/transforms/system_impairment/si.py index 97e33c6..a6fa370 100644 --- a/torchsig/transforms/system_impairment/si.py +++ b/torchsig/transforms/system_impairment/si.py @@ -67,12 +67,13 @@ def __call__(self, data: Any) -> Any: ) # Apply data transformation - new_data.iq_data = functional.fractional_shift( - data.iq_data, - self.taps, - self.interp_rate, - -decimal_part # this needed to be negated to be consistent with the previous implementation - ) + if decimal_part != 0: + new_data.iq_data = functional.fractional_shift( + data.iq_data, + self.taps, + self.interp_rate, + -decimal_part # this needed to be negated to be consistent with the previous implementation + ) new_data.iq_data = functional.time_shift(new_data.iq_data, int(integer_part)) # Update SignalDescription @@ -91,12 +92,14 @@ def __call__(self, data: Any) -> Any: new_data.signal_description = new_signal_description else: - new_data = functional.fractional_shift( - data, - self.taps, - self.interp_rate, - -decimal_part # this needed to be negated to be consistent with the previous implementation - ) + new_data = data.copy() + if decimal_part != 0: + new_data = functional.fractional_shift( + new_data, + self.taps, + self.interp_rate, + -decimal_part # this needed to be negated to be consistent with the previous implementation + ) new_data = functional.time_shift(new_data, int(integer_part)) return new_data