Advertisement
Guest User

Untitled

a guest
Oct 3rd, 2024
64
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 8.51 KB | None | 0 0
  1. import numpy as np
  2. from matplotlib import pyplot as plt
  3.  
  4.  
  5. rng = np.random.default_rng()
  6.  
  7.  
  8. def get_noise_power(signal_power: float, snr_dB: float) -> float:
  9.     """Returns the noise power given a signal power and a signal-to-noise ratio (SNR).
  10.  
  11.    Args:
  12.        signal_power (float): power of the signal
  13.        snr_dB (float): SNR in dB
  14.  
  15.    Returns:
  16.        float: _description_
  17.    """
  18.     return signal_power * 10**(-snr_dB / 10)
  19.    
  20.  
  21. def get_signal(frequency: float, sample_count: int, sampling_frequency: float, snr_dB: float) -> np.ndarray:
  22.     "Returns the signal of a certain frequency with additive noise"
  23.    
  24.     k = np.arange(sample_count)
  25.     signal = np.exp(2j * np.pi * k * frequency / sampling_frequency)
  26.  
  27.     signal_power = np.var(signal)
  28.     noise_variance = get_noise_power(signal_power, snr_dB) / 2
  29.     noise = noise_variance * (rng.standard_normal(sample_count) + rng.standard_normal(sample_count) * 1j)
  30.    
  31.     return signal + noise
  32.  
  33.  
  34. def get_log_likelihood(y: np.ndarray, frequencies: np.ndarray, sampling_frequency: float) -> np.ndarray:
  35.     """Returns the log likelihood of frequencies f given samples of the signal
  36.          y[k] = exp(j 2 pi f k / fs),
  37.       with sampling frequency fs:
  38.      
  39.         L(f) = Re{ sum_k=0^(K-1) y[k] exp(-j 2 pi f k / fs) }
  40.    
  41.       Note:
  42.       This is the log likelihood for the case that the amplitude (here: 1), the phase offset
  43.       (here: 0) and the time offset (here: 0) are known.
  44.  
  45.    Args:
  46.        y (np.ndarray): measurement vector
  47.        frequencies (np.ndarray): vector of frequencies for which the likelihood shall be computed
  48.        sampling_frequency (float): the sampling frequency
  49.  
  50.    Returns:
  51.        np.ndarray: a vector of likelihoods
  52.    """    
  53.    
  54.     k = np.arange(y.size)
  55.     return np.real(np.exp(-2j * np.pi / sampling_frequency * np.outer(frequencies, k)) @ y)
  56.  
  57.    
  58. def get_coarse_frequency_estimates(y: np.ndarray, peak_count: int, sampling_frequency: float, min_frequency: float, max_frequency: float, grid_size: int) -> np.ndarray:
  59.     """Given a grid of frequencies, this functions returns 'peak_count' frequencies on the grid that
  60.       have the largest likelihood.
  61.    """
  62.    
  63.     frequencies = np.linspace(min_frequency, max_frequency, grid_size)
  64.     values = get_log_likelihood(y=y, frequencies=frequencies, sampling_frequency=sampling_frequency)
  65.     idx = np.argsort(-values) # find peaks
  66.     return frequencies[idx[:peak_count]]
  67.  
  68.  
  69. def get_refined_frequency_estimate(y: np.ndarray, start_frequency: float, sampling_frequency: float, stop_frequency_delta: float) -> float:
  70.     """Tries to search the local maximum next to start_frequency using the Newton method.
  71.    
  72.    Args:
  73.        y (np.ndarray): measurement vector
  74.        start_frequency (float): initial frequency guess (the initial frequency for the Newton method)
  75.        sampling_frequency (float): the sampling frequency
  76.        stop_frequency_delta (float): If the frequency difference between two iterations is less than this, the algorithm stops.
  77.  
  78.    Returns:
  79.        float: the refined frequency estimate
  80.    """
  81.  
  82.     exp_arg = -2j * np.pi / sampling_frequency
  83.     cur_freq = start_frequency
  84.     freq_delta = np.inf
  85.     k = np.arange(y.size)
  86.     iteration_count = 0
  87.     max_iter_count = np.inf # 100000
  88.     c = sampling_frequency / (2 * np.pi)
  89.  
  90.     #print(f"\nRefining from {cur_freq}...")
  91.     while (np.abs(freq_delta) > stop_frequency_delta) and (iteration_count < max_iter_count):
  92.         iteration_count += 1
  93.         a = np.exp(exp_arg * cur_freq * k) * k
  94.         b = a * k
  95.         freq_delta = 0.1 * c * np.imag(a @ y) / np.real(b @ y)
  96.         cur_freq = cur_freq + freq_delta      
  97.        
  98.         #print(f"  EST: {cur_freq}  -  {freq_delta=}, {stop_frequency_delta=}")
  99.        
  100.     return cur_freq
  101.  
  102.  
  103. def get_crlb_for_known_phase(noise_variance: np.ndarray, sampling_frequency: float, sample_count: int) -> np.ndarray:
  104.     """Returns the Cramér-Rao Lower Bound (CRLB) for the case that the phase offset is known.
  105.  
  106.    Args:
  107.        noise_variance (np.ndarray): vector of noise variances for which the CRLB shall be computed
  108.        sampling_frequency (_type_): the sampling frequency
  109.        sample_count (_type_): the number of samples
  110.  
  111.    Returns:
  112.        np.ndarray: vector of CRLB values
  113.    """
  114.    
  115.     N = sample_count
  116.     return 3.0 * noise_variance * sampling_frequency**2 / (np.pi**2 * N * (N-1)  * (2*N - 1))
  117.  
  118.  
  119. def get_crlb_for_unknown_phase(noise_variance: np.ndarray, sampling_frequency: float, sample_count: int) -> np.ndarray:
  120.     """Returns the Cramér-Rao Lower Bound (CRLB) for the case that the phase offset is NOT known.
  121.  
  122.    Args:
  123.        noise_variance (np.ndarray): vector of noise variances for which the CRLB shall be computed
  124.        sampling_frequency (_type_): the sampling frequency
  125.        sample_count (_type_): the number of samples
  126.  
  127.    Returns:
  128.        np.ndarray: vector of CRLB values
  129.    """
  130.    
  131.     N = sample_count
  132.     return 12.0 * noise_variance * sampling_frequency**2 / (np.pi**2 * N * (N^2-1))
  133.  
  134.  
  135.  
  136. def main():
  137.     frequency = 2000 / (2 * np.pi)
  138.     sampling_frequency = 4000 / (2 * np.pi)
  139.     sample_count = 512
  140.    
  141.     min_frequency = 0
  142.     max_frequency = sampling_frequency
  143.     stop_frequency_delta = 1e-10
  144.     grid_size = 1000
  145.     peak_count = 2
  146.    
  147.     trial_count = 500
  148.     snrs_dB = np.arange(-15, 31, 5)
  149.    
  150.  
  151.     noise_vars = np.array([get_noise_power(signal_power=1.0, snr_dB=snr) / 2 for snr in snrs_dB])
  152.    
  153.     crlb_known_phase = get_crlb_for_known_phase(
  154.                         noise_variance=noise_vars,
  155.                         sampling_frequency=sampling_frequency,
  156.                         sample_count=sample_count)
  157.    
  158.     crlb_unknown_phase = get_crlb_for_unknown_phase(
  159.                         noise_variance=noise_vars,
  160.                         sampling_frequency=sampling_frequency,
  161.                         sample_count=sample_count)
  162.        
  163.     mse = np.zeros(len(snrs_dB))
  164.     estimated_frequencies = np.zeros((len(snrs_dB), trial_count))
  165.     for snr_idx, snr_dB in enumerate(snrs_dB):
  166.         print(f"SNR={snr_dB}")
  167.        
  168.         cur_mse = 0.0
  169.         for trial_idx in range(trial_count):
  170.             # construct signal with noise:
  171.             y = get_signal(
  172.                     frequency=frequency,
  173.                     sample_count=sample_count,
  174.                     sampling_frequency=sampling_frequency,
  175.                     snr_dB=snr_dB)
  176.            
  177.             # Do grid search to find the largest peaks in the likelihood function
  178.             coarse_frequencies = get_coarse_frequency_estimates(
  179.                                     y=y,
  180.                                     sampling_frequency=sampling_frequency,
  181.                                     grid_size=grid_size,
  182.                                     min_frequency=min_frequency,
  183.                                     max_frequency=max_frequency,
  184.                                     peak_count=peak_count)
  185.            
  186.             # Do a Gauß-Newton search to find the actual peak for each peak
  187.             refined_frequencies = np.array([
  188.                 get_refined_frequency_estimate(
  189.                     y=y,
  190.                     sampling_frequency=sampling_frequency,
  191.                     start_frequency=f,
  192.                     stop_frequency_delta=stop_frequency_delta)
  193.                 for f in coarse_frequencies
  194.             ])
  195.            
  196.             # Find  the peak that has the largest likelihood
  197.             likelihoods = get_log_likelihood(
  198.                             y=y,
  199.                             frequencies=refined_frequencies,
  200.                             sampling_frequency=sampling_frequency)
  201.             max_likelihood_idx = np.argmax(likelihoods)
  202.             estimated_frequency = refined_frequencies[max_likelihood_idx]
  203.             estimated_frequencies[snr_idx, trial_idx] = estimated_frequency
  204.            
  205.             # update mean squared error (MSE):
  206.             cur_mse += (frequency - estimated_frequency)**2
  207.            
  208.         cur_mse /= trial_count
  209.         mse[snr_idx] = cur_mse
  210.        
  211.     bias = np.mean(estimated_frequencies, axis=1) - frequency
  212.     print(f"Bias: {bias}")
  213.        
  214.     fig, ax = plt.subplots()
  215.     ax.plot(snrs_dB, mse, label="MSE")
  216.     ax.plot(snrs_dB, crlb_known_phase, label="CRLB known phase")
  217.     ax.plot(snrs_dB, crlb_unknown_phase, label="CRLB unknown phase")
  218.     ax.grid()
  219.     ax.set_yscale('log')
  220.     ax.legend()
  221.    
  222.     plt.show()
  223.  
  224. if __name__ == '__main__':
  225.     main()
  226.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement