Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- from matplotlib import pyplot as plt
- rng = np.random.default_rng()
- def get_noise_power(signal_power: float, snr_dB: float) -> float:
- """Returns the noise power given a signal power and a signal-to-noise ratio (SNR).
- Args:
- signal_power (float): power of the signal
- snr_dB (float): SNR in dB
- Returns:
- float: _description_
- """
- return signal_power * 10**(-snr_dB / 10)
- def get_signal(frequency: float, sample_count: int, sampling_frequency: float, snr_dB: float) -> np.ndarray:
- "Returns the signal of a certain frequency with additive noise"
- k = np.arange(sample_count)
- signal = np.exp(2j * np.pi * k * frequency / sampling_frequency)
- signal_power = np.var(signal)
- noise_variance = get_noise_power(signal_power, snr_dB) / 2
- noise = noise_variance * (rng.standard_normal(sample_count) + rng.standard_normal(sample_count) * 1j)
- return signal + noise
- def get_log_likelihood(y: np.ndarray, frequencies: np.ndarray, sampling_frequency: float) -> np.ndarray:
- """Returns the log likelihood of frequencies f given samples of the signal
- y[k] = exp(j 2 pi f k / fs),
- with sampling frequency fs:
- L(f) = Re{ sum_k=0^(K-1) y[k] exp(-j 2 pi f k / fs) }
- Note:
- This is the log likelihood for the case that the amplitude (here: 1), the phase offset
- (here: 0) and the time offset (here: 0) are known.
- Args:
- y (np.ndarray): measurement vector
- frequencies (np.ndarray): vector of frequencies for which the likelihood shall be computed
- sampling_frequency (float): the sampling frequency
- Returns:
- np.ndarray: a vector of likelihoods
- """
- k = np.arange(y.size)
- return np.real(np.exp(-2j * np.pi / sampling_frequency * np.outer(frequencies, k)) @ y)
- def get_coarse_frequency_estimates(y: np.ndarray, peak_count: int, sampling_frequency: float, min_frequency: float, max_frequency: float, grid_size: int) -> np.ndarray:
- """Given a grid of frequencies, this functions returns 'peak_count' frequencies on the grid that
- have the largest likelihood.
- """
- frequencies = np.linspace(min_frequency, max_frequency, grid_size)
- values = get_log_likelihood(y=y, frequencies=frequencies, sampling_frequency=sampling_frequency)
- idx = np.argsort(-values) # find peaks
- return frequencies[idx[:peak_count]]
- def get_refined_frequency_estimate(y: np.ndarray, start_frequency: float, sampling_frequency: float, stop_frequency_delta: float) -> float:
- """Tries to search the local maximum next to start_frequency using the Newton method.
- Args:
- y (np.ndarray): measurement vector
- start_frequency (float): initial frequency guess (the initial frequency for the Newton method)
- sampling_frequency (float): the sampling frequency
- stop_frequency_delta (float): If the frequency difference between two iterations is less than this, the algorithm stops.
- Returns:
- float: the refined frequency estimate
- """
- exp_arg = -2j * np.pi / sampling_frequency
- cur_freq = start_frequency
- freq_delta = np.inf
- k = np.arange(y.size)
- iteration_count = 0
- max_iter_count = np.inf # 100000
- c = sampling_frequency / (2 * np.pi)
- #print(f"\nRefining from {cur_freq}...")
- while (np.abs(freq_delta) > stop_frequency_delta) and (iteration_count < max_iter_count):
- iteration_count += 1
- a = np.exp(exp_arg * cur_freq * k) * k
- b = a * k
- freq_delta = 0.1 * c * np.imag(a @ y) / np.real(b @ y)
- cur_freq = cur_freq + freq_delta
- #print(f" EST: {cur_freq} - {freq_delta=}, {stop_frequency_delta=}")
- return cur_freq
- def get_crlb_for_known_phase(noise_variance: np.ndarray, sampling_frequency: float, sample_count: int) -> np.ndarray:
- """Returns the Cramér-Rao Lower Bound (CRLB) for the case that the phase offset is known.
- Args:
- noise_variance (np.ndarray): vector of noise variances for which the CRLB shall be computed
- sampling_frequency (_type_): the sampling frequency
- sample_count (_type_): the number of samples
- Returns:
- np.ndarray: vector of CRLB values
- """
- N = sample_count
- return 3.0 * noise_variance * sampling_frequency**2 / (np.pi**2 * N * (N-1) * (2*N - 1))
- def get_crlb_for_unknown_phase(noise_variance: np.ndarray, sampling_frequency: float, sample_count: int) -> np.ndarray:
- """Returns the Cramér-Rao Lower Bound (CRLB) for the case that the phase offset is NOT known.
- Args:
- noise_variance (np.ndarray): vector of noise variances for which the CRLB shall be computed
- sampling_frequency (_type_): the sampling frequency
- sample_count (_type_): the number of samples
- Returns:
- np.ndarray: vector of CRLB values
- """
- N = sample_count
- return 12.0 * noise_variance * sampling_frequency**2 / (np.pi**2 * N * (N^2-1))
- def main():
- frequency = 2000 / (2 * np.pi)
- sampling_frequency = 4000 / (2 * np.pi)
- sample_count = 512
- min_frequency = 0
- max_frequency = sampling_frequency
- stop_frequency_delta = 1e-10
- grid_size = 1000
- peak_count = 2
- trial_count = 500
- snrs_dB = np.arange(-15, 31, 5)
- noise_vars = np.array([get_noise_power(signal_power=1.0, snr_dB=snr) / 2 for snr in snrs_dB])
- crlb_known_phase = get_crlb_for_known_phase(
- noise_variance=noise_vars,
- sampling_frequency=sampling_frequency,
- sample_count=sample_count)
- crlb_unknown_phase = get_crlb_for_unknown_phase(
- noise_variance=noise_vars,
- sampling_frequency=sampling_frequency,
- sample_count=sample_count)
- mse = np.zeros(len(snrs_dB))
- estimated_frequencies = np.zeros((len(snrs_dB), trial_count))
- for snr_idx, snr_dB in enumerate(snrs_dB):
- print(f"SNR={snr_dB}")
- cur_mse = 0.0
- for trial_idx in range(trial_count):
- # construct signal with noise:
- y = get_signal(
- frequency=frequency,
- sample_count=sample_count,
- sampling_frequency=sampling_frequency,
- snr_dB=snr_dB)
- # Do grid search to find the largest peaks in the likelihood function
- coarse_frequencies = get_coarse_frequency_estimates(
- y=y,
- sampling_frequency=sampling_frequency,
- grid_size=grid_size,
- min_frequency=min_frequency,
- max_frequency=max_frequency,
- peak_count=peak_count)
- # Do a Gauß-Newton search to find the actual peak for each peak
- refined_frequencies = np.array([
- get_refined_frequency_estimate(
- y=y,
- sampling_frequency=sampling_frequency,
- start_frequency=f,
- stop_frequency_delta=stop_frequency_delta)
- for f in coarse_frequencies
- ])
- # Find the peak that has the largest likelihood
- likelihoods = get_log_likelihood(
- y=y,
- frequencies=refined_frequencies,
- sampling_frequency=sampling_frequency)
- max_likelihood_idx = np.argmax(likelihoods)
- estimated_frequency = refined_frequencies[max_likelihood_idx]
- estimated_frequencies[snr_idx, trial_idx] = estimated_frequency
- # update mean squared error (MSE):
- cur_mse += (frequency - estimated_frequency)**2
- cur_mse /= trial_count
- mse[snr_idx] = cur_mse
- bias = np.mean(estimated_frequencies, axis=1) - frequency
- print(f"Bias: {bias}")
- fig, ax = plt.subplots()
- ax.plot(snrs_dB, mse, label="MSE")
- ax.plot(snrs_dB, crlb_known_phase, label="CRLB known phase")
- ax.plot(snrs_dB, crlb_unknown_phase, label="CRLB unknown phase")
- ax.grid()
- ax.set_yscale('log')
- ax.legend()
- plt.show()
- if __name__ == '__main__':
- main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement