How can one determine the period of a single element of a dataset?

88 Views Asked by At

I have dummy data and actual data that I suspect modulates over 24 hours

The dummy data is in this Desmos Graph.

I want to find the period of the periodic component of the data that's input. There are other components that are constants, falls, and noise.

The dummy data is in the file createData.py:

import numpy as np
import csv
import random as rdm
from math import exp as e



# Create sample data to test the plottingfunctions lombscargle analysis 

num = []
time = []
data = []

def dataMaker(x):
    expansionComp = 45
    insidePart = (2*np.pi*x)/24
    sinComp = expansionComp * (-1 * np.cos(insidePart))
    trendComp = 250*x**(1/7)
    Const = 875
    rdm.seed()
    randComp = 10 * rdm.randint(-1,1) * rdm.random()
    fakeData = sinComp - trendComp + Const + randComp
    return fakeData

def dummy(x):
    constant = 900
    if x != 0:
        expansionPart = 35 - ((300 * np.sqrt(x)) * (1/x))
    else:
        expansionPart = 0
    insidePart = (2*np.pi*x)/24
    sinComponent = expansionPart * np.cos(insidePart)
    trendComponent = e(x/50)
    rdm.seed()
    randComponent = 10 * rdm.randint(-1,1) * rdm.random()
    dummyData = constant - sinComponent - trendComponent + randComponent
    return dummyData

def fakeData(x):
    constant = 850
    if x != 0:
        expansionPart = 5 - ((10 * np.sqrt(x)) * (1/x))
    else:
        expansionPart = 0
    insidePart = (2*np.pi*x)/24
    sinComponent = expansionPart * np.cos(insidePart)
    trendComponent = e(x/80)
    rdm.seed()
    randComponent = 10 * rdm.randint(-1,1) * rdm.random()
    fakedata = constant - sinComponent - trendComponent + randComponent
    return fakedata

def noTrend(x):
    constant = 750
    sinOne = 10 * np.cos((1/12)*np.pi*x)
    sinTwo = 5 * np.cos((1/24)*np.pi*x)
    sinThree = np.cos((1/48)*np.pi*x)
    eqn = sinOne + sinTwo + sinThree + constant
    return eqn

def hiFreq(x):
    constant = 450
    one = 3 * np.cos(2*np.pi*x/240)
    two = np.cos(40*np.pi*x/240)
    three = 0.5 * np.cos(20*np.pi*x/240)
    signal = constant + one + two + three
    return signal

def testData(x):
    constant = 100
    a = 3 * np.cos(2*np.pi*x)
    b = np.cos(40*np.pi*x)
    c = 0.5 * np.cos(20*np.pi*x)
    data = constant + a + b + c
    return data

for i in range (350):
    num.append(i)
    time.append(float((i+1)*3600))
    data.append(float(testData(i/100)))

filePath = 'peaks_Run5.csv'



listFile = open(filePath,"w+") #opening file
print("\nWriting Data to CSV File for Gain Analysis...")
for i in range(len(num)):
    listFile.write("{}".format(num[i]))
    listFile.write(",")
    listFile.write("{}".format(time[i]))
    listFile.write(",")
    listFile.write("{}".format(data[i]))
    listFile.write("\n")

listFile.close()

I used Python, specifically scipy.fft.rfft, to try to find the period Here's the code gainAnalysis.py:

# This is a program to analyze and determine periodicity in the Rn Emanation 
# data, in order to hopefully fix at least part of the gain modulation issue
# with the Warm Emanation System
# Written 09-19-2023 by Ryott Glayzer

import numpy as np
import matplotlib.pyplot as plt
import csv, yaml, sys, os
import pandas as pd
import pytz
import matplotlib as mpl

from Config import configFileManagment as cnfg
from datetime import datetime, timedelta
from tqdm.auto import tqdm
from numpy.polynomial import polynomial as poly
from scipy.signal import lombscargle
from scipy.fft import rfft, rfftfreq

class OrganizeData:
    def __init__(self, systemName, runNum, useYaml=True, debug=False):
        # Set Debug Condition
        self.debug = debug
        self.invalid = "\t Invalid Input."
        self.runNum = runNum
        self.systemName = systemName
        self.plotDetrendChoice = True
        
        # Specify setting Dictionary
        if useYaml is True:
            yamlFilename = "analysisSettings_{}.yaml".format(runNum)
            tempDictionary = cnfg.setSettings(
                analysisConfigFilename=yamlFilename,
                systemName=systemName
            )
            self.settingDictionary = dict(tempDictionary)
        elif useYaml is False:
            self.settingDictionary = {}
            self.settingDictionary['displayPlots'] = True

        # set return values as instances
        self.peaksDF, self.unixStartTime = self.fetchPeaksData()
        self.envDF = self.fetchEnvironmentalData()
        self.envBinned = self.rebinEnvData()
        self.cutEnvData, self.unixEndTime = self.cutEnvData()
        self.plotStartTime, self.plotEndTime, self.plotUnixStart, self.plotUnixEnd = self.plotDates()
        self.envData = self.meanTemp()


    def fetchPeaksData(self):
        # Takes in the System name and Run Number
        # 
        # Returns a Pandas DataFrame with the Run Hour Number, Unix Timestamp, and 
        # Peak Value for a Given Run  
        # 
        # The csv files under the listOfPeaks directory now have the unwritten 
        # header: "peakCounter","timeRelativeToStartOfSampleList","x_Select"
        # from the correctGain.correctGain() module and are named 'peaks_Run{}.csv'
        # rather than 'listOfPeaks_Run{}.csv' 
        
        # Specify the CSV Filepath
        peaksCSVFilePath = os.path.join(
            cnfg.scriptDirectory(),
            "listOfPeaks",
            self.systemName,
            "peaks_Run{}.csv".format(self.runNum)
        )
     
        # Read the CSV File into a Pandas DataFrame
        csvCols = [
            'BIN',
            'TIMESTAMP',
            'PEAK'
        ]
        peaksDF = pd.read_csv(
            peaksCSVFilePath,
            names=csvCols,
            header=None
        )
        if self.debug:
            print(peaksDF)

        # Check Run Settings File for dataStartTime
        if "dataStartTime" not in self.settingDictionary:
            print("dataStartTime not in Run Settings.\n")
            self.settingDictionary["dataStartTime"] = '2000-01-01 00:00'
            """
            self.settingDictionary["dataStartTime"] = input(
                "Please input the dataStartTime for Run {}".format(self.runNum) +
                "(example: '2020-10-02 16:05:00'): "
            )
            """
        else:
            if self.debug:
                print("dataStartTime is {}".format(self.settingDictionary["dataStartTime"]))
        
        # Convert dataStartTime to unix time
        dST_Xtz = datetime.strptime(
            self.settingDictionary["dataStartTime"],
            '%Y-%m-%d %H:%M'
        )
        Mtz = pytz.timezone("US/Mountain")
        dST_Mtz = Mtz.localize(dST_Xtz)
        unixStartTime = dST_Mtz.timestamp()
        if self.debug:
            print("unixStartTime is {}".format(unixStartTime))
        
        # Add the unixStartTime to each of the TIMESTAMP values in peaksDF
        peaksDF['TIMESTAMP'] = peaksDF['TIMESTAMP'] + unixStartTime
        if self.debug:
            print(peaksDF)

        # Add a DateTime Object to the DataFrame
        peaksDF['DATETIME'] = pd.to_datetime(peaksDF['TIMESTAMP'], unit='s')
        if self.debug:
            print(peaksDF)


        return peaksDF, unixStartTime


    def fetchEnvironmentalData(self):
        #
        # returns pandas dataframe with timestamp objects tied to environmental 
        # objects and organized into differing types of environmental data 
        # This will help when comparing runs to environmental data. 
        # 
        # In order to know which file we're fetching, we must know the applicable
        # dataStartTime

        # Determine dateFactor based on dates between environmental Data files
        # 
        # THIS WILL BE DONE AFTER AN INVENTORY IS TAKEN OF TT_ENV_DATA FILE HEADERS
        # IS DONE IN manipulateTabletopdata.py 
        # 
        # For now, a dateFactor of 0 will work as long as our emanation run is
        # Run No. 652 or later. 
        dateFactor = 0
        
        # Specify the Environvmental Data file Path
        envDataFilePath = os.path.join(
            cnfg.scriptDirectory(),
            'environmentalData',
            'ttEnvData{}.csv'.format(dateFactor)
        )

        # Extract the envData into a Pandas DataFrame 
        iEnvDF = pd.read_csv(
            envDataFilePath,
            header=0
        )
        if self.debug:
            print("envDF Before Cutting Unusable Data: ")
            print(iEnvDF)
        
        envDF = iEnvDF.drop(['R0_RH','R1_RH','P0_P','P1_P'],axis=1)

        if self.debug:
            print("envDF after cutting Unusable Data: ")
            print(envDF)

        return envDF


    def rebinEnvData(self):
        #
        # this will rebin the environmental data into something larger than 3 times 
        # a minute or whatever the tabletop system is recording. I remember 
        # Dr. Schnee and Dr. Street recommended bins of 1 minute or maybe 5 mins. 
        # I will further discuss this with them so I can understand why we would 
        # want that many bins when the emanation Data is binned into an hour at 
        # minimum. 
        #  

        # call the envData DataFrame
        if self.debug:
            print("Fetching the DataFrame from Environmental Data...")
        envData = self.envDF
        if self.debug:
            print("envData: \n")
            print(envData)
        
        # Now I need to be able detect skips in the time series, and then bin the
        # data between those skips 

        # Sort envData into ascending order
        envData['dateTime'] = pd.to_datetime(envData['dateTime'])
        if self.debug:
            print(envData)
        envData.sort_values(by='dateTime',inplace=True)  
        if self.debug:
            print("envData: \n")
            print(envData)

        # Measure Gaps in the Datetime Objects and set a gap threshhold
        envData['timeDiff'] = envData['dateTime'].diff()
        gapThreshold = pd.Timedelta(minutes=3)
        gapsDF = envData[envData['timeDiff'] > gapThreshold]
        if self.debug:
            print("gapsDF: \n")
            print(gapsDF)

        # Create a DataFrame of all rows where 'num' == 0
        zerosDF = envData[envData['num'] == 0]
        if self.debug:
            print("zerosDF: \n")
            print(zerosDF)
        
        # Now i need to bin the data between the gaps in gapsDF to 5 minute bins
        # Resample to 5 minute bins
        envBinned = envData.resample("5Min", on="dateTime").mean()
        envBinned['dateTime'] = envBinned.index
        if self.debug:
            print("envBinned: \n")
            print(envBinned)
        
        return envBinned


    def cutEnvData(self):
        # This method will cut the rebinned dataframe into a smaller dataframe 
        # containing only the environmental data that is necessary for the specific 
        # run being tested

        # This will grab the highest value from the peaksData DataFrame
        unixEndTime = self.peaksDF['TIMESTAMP'].max()
        if self.debug:
            print("unixEndTime: ")
            print(unixEndTime)

        # This will cut the data between the unixStartTime asd unixEndTime
        try:
            cutEnvData = self.envBinned[
                (self.unixStartTime < self.envBinned['timeStamp']) & (self.envBinned['timeStamp'] < unixEndTime)
            ]
            if self.debug:
                print("cutEnvData: \n")
                print(cutEnvData)
        except:
            cutEnvData = []
            print("Environmental Data does not exist for the length of Run {}".format(runNum))
        
        return cutEnvData, unixEndTime


    def plotDates(self):
        # This will create the time constraints for the plots and return them so 
        # other methods don't have to call the plotPeaks method

        # Convert Unix Times te DateTime Objects
        plotStartTime = datetime.fromtimestamp(
            self.unixStartTime, 
            tz=pytz.timezone('America/Denver')
        ) - timedelta(days=1)
        plotEndTime = datetime.fromtimestamp(
            self.unixEndTime,
            tz=pytz.timezone('America/Denver')
        ) + timedelta(days=1)

        plotUnixStart = self.unixStartTime - 86400
        plotUnixEnd = self.unixEndTime + 86400

        if self.debug:
            print(plotStartTime)
            print(plotEndTime)
        
        return plotStartTime, plotEndTime, plotUnixStart, plotUnixEnd


    def meanTemp(self):
        # This will take the mean of the temperature data rather than four 
        # separate readings 
        tempCols = ['R0_T','R1_T','P0_T','P1_T']
        self.cutEnvData['meanTemp'] = self.cutEnvData[tempCols].mean(axis=1)
        envData = self.cutEnvData
        if self.debug:
            print("New Env Data with Mean Temperature: ")
            print(envData)
        return envData
        
   

class AnalyzeGain(OrganizeData):
    def __init__(self, systemName, runNum, useYaml=True, debug=False):
        # This calls the OrganizeData object
        super().__init__(systemName,runNum,useYaml,debug)

        # Set objects
        self.peakArray = np.array(self.peaksDF['PEAK'])
        self.peaksFourier, self.normalizeFFT, self.peaksFFTFreq = self.peaksFFT()


 
    def peaksFFT(self):
        # This method will perform an rFFT on the peaks data using the scipy
        # rFFT module.
        peaksFourier = rfft(self.peakArray)
        N = len(self.peakArray)
        normalizeFFT = N/2
        frequency_axis = rfftfreq(N, d=3600)
        return peaksFourier, normalizeFFT, frequency_axis






class PlotAnalysis(AnalyzeGain):
    def __init__(self, systemName, runNum, useYaml=True, debug=False):
        # This calls the OrganizeData Object
        super().__init__(systemName,runNum,useYaml,debug)
        


    def plotPeaks(self):
        if self.debug:
            # This will plot the bin Hours vs Peaks
            plt.figure("Peaks vs Hours (Bad Intervals Removed) Run {}".format(self.runNum))
            plt.title("Peaks vs Hours (Bad Intervals Removed) Run {}".format(self.runNum))
            plt.plot(
                self.peaksDF['BIN'],
                self.peaksDF['PEAK']
            )
            plt.xlabel("Time (Hours)")
            plt.ylabel("Energy Channel")
            plt.grid()

            # This will plot the unix time vs peaks
            plt.figure("Peaks vs Unix Time Run {}".format(self.runNum))
            plt.title("Peaks vs Unix Time Run {}".format(self.runNum))
            plt.plot(
                self.peaksDF['TIMESTAMP'],
                self.peaksDF['PEAK']
            )
            plt.xlabel("Date")
            plt.ylabel("Energy Channel")
            plt.xlim(self.plotStartTime.timestamp(),self.plotEndTime.timestamp())
            plt.grid()

        # This will plot the datetime vs peaks
        plt.figure("Peaks vs Date Run {}".format(self.runNum))
        plt.title("Peaks vs Date Run {}".format(self.runNum))
        plt.plot(
            self.peaksDF['DATETIME'],
            self.peaksDF['PEAK']
        )
        plt.xlabel("Date")
        plt.ylabel("Energy Channel")
        plt.xlim(self.plotStartTime,self.plotEndTime)
        plt.grid()
        plt.show(block=False)
        #input("Press Enter to close all plots.")


    def plotEnvData(self):
        """
        It seems that, at least with the Tabletop data, the Relative Humidity 
        and Pressure measurements are not likely to be representative of the 
        environmental conditions in the laboratory, as they are measuring the 
        environmental conditions in a closed system separate from the Radon
        Emanation System.

        The tabletop Temperature measurement likely has an offset of temperature
        value, though that is okay as the variation in temperature is likely
        representative of that across the entire lab.
        """
        plt.figure("Mean Temperature vs Time Run {}".format(self.runNum))
        plt.title("Mean Temperature vs Time Run {}".format(self.runNum))
        plt.plot(
            self.envData['dateTime'],
            self.envData['P1_T'],
            color='red',
            label='Temperature'
        )
        plt.xlabel('Date')
        plt.ylabel('Temperature (C)')
        plt.xlim(self.plotStartTime,self.plotEndTime)
        plt.legend()
        plt.grid()

        if self.debug:
            plt.figure("Temperature vs Time Run {}".format(self.runNum))
            plt.title("Temperature vs Time Run {}".format(self.runNum))
            plt.plot(
                self.envData['dateTime'],
                self.envData['R1_T'],
                color='red',
                label='R1_T Sensor'
            )
            plt.plot(
                self.envData['dateTime'],
                self.envData['R0_T'],
                color='blue',
                label='R0_T Sensor'
            )
            plt.plot(
                self.envData['dateTime'],
                self.envData['P0_T'],
                color='green',
                label='P0_T Sensor'
            )  
            plt.plot(
                self.envData['dateTime'],
                self.envData['P1_T'],
                color='purple',
                label='P1_T Sensor'
            )
            plt.xlabel('Date')
            plt.ylabel('Temperature (C)')
            plt.xlim(self.plotStartTime,self.plotEndTime)
            plt.legend()
            plt.grid()
        
        plt.show(block=False)
        input("Press Enter to Close Plots and End")


    def plotPeaksVsEnv(self):
        plt.figure("Temperature vs Peaks vs Time Run {}".format(self.runNum))
        plt.title("Temperature vs Peaks vs Time Run {}".format(self.runNum))
        plt.plot(
            self.envData['dateTime'],
            self.envData['P1_T'],
            color='purple',
            label='Temperature'
        )
        plt.xlabel('Date')
        plt.ylabel('Temperature (C)')
        plt.xlim(self.plotStartTime,self.plotEndTime)
        plt.legend()
        plt.gca().twinx().plot(
            self.peaksDF['DATETIME'],
            self.peaksDF['PEAK'],
            color='black',
            label='Peaks'
        )
        plt.gca().twinx().set_ylabel(
            'Energy Channel',
            color='black'
        )
        plt.grid()



        plt.show(block=False)
        input("Done. Press Enter to Close Plots and End ")


    def plotPeaksFFT(self):
        # This method will plot the FFT from AnalyzeGain.peaksFFT()
        # Specifically the spectrum |Xk|
        plt.figure("Peaks rFFT for Run {}".format(self.runNum))
        plt.title("Peaks rFFT for Run {}".format(self.runNum))
        plt.plot(np.abs(self.peaksFourier))
        plt.show(block=False)
        #input("Press Enter to Close Plots")


    def plotPeaksFFTNorm(self):
        plt.figure("Peaks rFFT for Run {}".format(self.runNum))
        plt.title("Peaks rFFT for Run {}".format(self.runNum))
        plt.plot(np.abs(self.peaksFourier)/self.normalizeFFT)
        plt.xlabel('Samples')
        plt.ylabel('Amplitude')
        plt.show(block=False)
        #input("Press Enter to Close Plots")


    def plotPeaksFFTFreq(self):
        plt.figure("Peaks rFFT Frequency Spectrum for Run {}".format(self.runNum))
        plt.title("Peaks rFFT Frequency Spectrum for Run {}".format(self.runNum))
        plt.plot(
            self.peaksFFTFreq,
            np.abs(self.peaksFourier)/self.normalizeFFT
        )
        plt.xlabel('Frequency (Hz)')
        plt.ylabel('Amplitude')
        plt.show(block=False)
        #input("Press Enter to Close Plots")


    def plotPeaksFFTPeriodicity(self):
        plt.figure("Peaks rFFT Periodicity Spectrum for Run {}".format(self.runNum))
        plt.title("Peaks rFFT Periodicity Spectrum for Run {}".format(self.runNum))
        plt.scatter(
            1/(3600*self.peaksFFTFreq),
            np.abs(self.peaksFourier)/self.normalizeFFT
        )
        plt.xlabel('Period (Hr)')
        plt.ylabel('Value')
        plt.show(block=False)
        input("Press Enter to Close Plots")



        
    def plotRawTemps(self):
        # It appears
        plt.figure('Raw Temps Run')
        plt.plot(
            self.envDF['dateTime'],
            self.envDF['R1_T']
        )
        plt.grid()
        plt.show(block=False)
        input("Enter!!!")



"""
PYTHON PROGRAM PLANNING:

This program will fetch two sets of peaks data:

The first set consists of the peaks of the raw data with the bad intervals 
removed, which could cause issues with the periodicity measurements as there is
data missing that could affect the apparent period of the modulation. This data
will likely use a fourier analysis to analyze the periodicity/frequency

The second set consists of the peaks of the raw data, but with the bad intervals
included as blank space in the data (it will appear as if no data was taken 
during the bad interval periods). This dataset will likely use a Lomb-Scargle
Periodogram as that is better for broken-up datasets. 

This program can only be used with a guess function gain correction type.

It will use the scikit-learn machine learning modules to do a polynomial 
regression and detrend the peaks data interactively to find the best fit to 
detrend the data and analyze the possible periodicity of the data

It will then detrend and smooth using a Savitzky-Golay Filter in order to make
easier for the numerical methods to fit a composition of sinusoidal functions to
the data.

It will also calculate the extrema of the periodic data and measure the periods
of the modulation, as well as normalize the peaks and troughs of the data for
another FFT analysis of the data, though this may be overfitting the data 
(check with schnee)

It will also measure and analyze environmental data and plot it against the
raw peaks data in order to determine what may be cousing the gain shift.

The environmental data from Joseph will be added to the git repository probably 
in a new pythonrnemanationanalysis/environmentalData directory and will be 
managed by a separate program within that directory and organized into a CSV
file within the emanationAnalysis directory, like the peaksData CSV files are.

This program will call upon that CSV data and analyze/plot it.

It will interact/interface with main.py and will read from runSettings directory
"""








""" 
DESCRIPTION OF PROGRAM:








"""

# Area to test methods


plot = PlotAnalysis('WES',0, useYaml=False)

plot.plotPeaks()
#plot.plotEnvData()
#plot.plotPeaksVsEnv()
#plot.plotPeaksFFT()
plot.plotPeaksFFTNorm()
plot.plotPeaksFFTFreq()
plot.plotPeaksFFTPeriodicity()

and heres peaks_Run0.csv:

0,3600.0,827
1,7200.0,581
2,10800.0,552
3,14400.0,550
4,18000.0,554
5,21600.0,549
6,25200.0,552
7,28800.0,550
8,32400.0,567
9,36000.0,558
10,39600.0,566
11,43200.0,573
12,46800.0,561
13,50400.0,557
14,54000.0,543
15,57600.0,539
16,61200.0,526
17,64800.0,511
18,68400.0,494
19,72000.0,482
20,75600.0,470
21,79200.0,456
22,82800.0,447
23,86400.0,440
24,90000.0,430
25,93600.0,427
26,97200.0,437
27,100800.0,449
28,104400.0,440
29,108000.0,457
30,111600.0,468
31,115200.0,474
32,118800.0,482
33,122400.0,494
34,126000.0,499
35,129600.0,512
36,133200.0,502
37,136800.0,503
38,140400.0,493
39,144000.0,477
40,147600.0,466
41,151200.0,461
42,154800.0,448
43,158400.0,435
44,162000.0,423
45,165600.0,405
46,169200.0,404
47,172800.0,398
48,176400.0,404
49,180000.0,401
50,183600.0,391
51,187200.0,413
52,190800.0,403
53,194400.0,429
54,198000.0,426
55,201600.0,443
56,205200.0,453
57,208800.0,464
58,212400.0,466
59,216000.0,470
60,219600.0,471
61,223200.0,464
62,226800.0,461
63,230400.0,457
64,234000.0,452
65,237600.0,441
66,241200.0,414
67,244800.0,407
68,248400.0,395
69,252000.0,385
70,255600.0,373
71,259200.0,376
72,262800.0,378
73,266400.0,370
74,270000.0,373
75,273600.0,379
76,277200.0,378
77,280800.0,392
78,284400.0,405
79,288000.0,419
80,291600.0,429
81,295200.0,439
82,298800.0,444
83,302400.0,445
84,306000.0,441
85,309600.0,443
86,313200.0,441
87,316800.0,429
88,320400.0,417
89,324000.0,407
90,327600.0,396
91,331200.0,387
92,334800.0,381
93,338400.0,365
94,342000.0,355
95,345600.0,348
96,349200.0,350
97,352800.0,355
98,356400.0,361
99,360000.0,366
100,363600.0,377
101,367200.0,379
102,370800.0,382
103,374400.0,403
104,378000.0,414
105,381600.0,420
106,385200.0,435
107,388800.0,431
108,392400.0,431
109,396000.0,423
110,399600.0,421
111,403200.0,416
112,406800.0,398
113,410400.0,393
114,414000.0,375
115,417600.0,370
116,421200.0,359
117,424800.0,349
118,428400.0,347
119,432000.0,337
120,435600.0,340
121,439200.0,344
122,442800.0,344
123,446400.0,338
124,450000.0,348
125,453600.0,365
126,457200.0,376
127,460800.0,394
128,464400.0,389
129,468000.0,406
130,471600.0,412
131,475200.0,416
132,478800.0,427
133,482400.0,421
134,486000.0,410
135,489600.0,402
136,493200.0,386
137,496800.0,387
138,500400.0,362
139,504000.0,357
140,507600.0,346
141,511200.0,336
142,514800.0,328
143,518400.0,329
144,522000.0,321
145,525600.0,318
146,529200.0,326
147,532800.0,326
148,536400.0,341
149,540000.0,352
150,543600.0,356
151,547200.0,374
152,550800.0,392
153,554400.0,402
154,558000.0,400
155,561600.0,399
156,565200.0,415
157,568800.0,399
158,572400.0,405
159,576000.0,397
160,579600.0,372
161,583200.0,369
162,586800.0,354
163,590400.0,352
164,594000.0,332
165,597600.0,332
166,601200.0,326
167,604800.0,317
168,608400.0,310
169,612000.0,301
170,615600.0,320
171,619200.0,317
172,622800.0,336
173,626400.0,341
174,630000.0,347
175,633600.0,363
176,637200.0,378
177,640800.0,383
178,644400.0,395
179,648000.0,393
180,651600.0,399
181,655200.0,393
182,658800.0,388
183,662400.0,375
184,666000.0,376
185,669600.0,366
186,673200.0,347
187,676800.0,343
188,680400.0,324
189,684000.0,314
190,687600.0,311
191,691200.0,302
192,694800.0,303
193,698400.0,301
194,702000.0,310
195,705600.0,312
196,709200.0,330
197,712800.0,334
198,716400.0,342
199,720000.0,351
200,723600.0,366
201,727200.0,373
202,730800.0,380
203,734400.0,387
204,738000.0,385
205,741600.0,375
206,745200.0,387
207,748800.0,371
208,752400.0,360
209,756000.0,350
210,759600.0,340
211,763200.0,320
212,766800.0,313
213,770400.0,313
214,774000.0,302
215,777600.0,293
216,781200.0,291
217,784800.0,292
218,788400.0,296
219,792000.0,306
220,795600.0,304
221,799200.0,322
222,802800.0,327
223,806400.0,339
224,810000.0,349
225,813600.0,364
226,817200.0,374
227,820800.0,376
228,824400.0,369
229,828000.0,375
230,831600.0,376
231,835200.0,369
232,838800.0,354
233,842400.0,337
234,846000.0,329
235,849600.0,317
236,853200.0,314
237,856800.0,300
238,860400.0,289
239,864000.0,288
240,867600.0,280
241,871200.0,284
242,874800.0,288
243,878400.0,295
244,882000.0,310
245,885600.0,313
246,889200.0,319
247,892800.0,337
248,896400.0,347
249,900000.0,356
250,903600.0,358
251,907200.0,367
252,910800.0,361
253,914400.0,360
254,918000.0,362
255,921600.0,355
256,925200.0,345
257,928800.0,332
258,932400.0,322
259,936000.0,306
260,939600.0,305
261,943200.0,298
262,946800.0,275
263,950400.0,282
264,954000.0,276
265,957600.0,281
266,961200.0,281
267,964800.0,287
268,968400.0,296
269,972000.0,313
270,975600.0,309
271,979200.0,337
272,982800.0,340
273,986400.0,352
274,990000.0,356
275,993600.0,365
276,997200.0,361
277,1000800.0,368
278,1004400.0,355
279,1008000.0,347
280,1011600.0,338
281,1015200.0,329
282,1018800.0,316
283,1022400.0,297
284,1026000.0,301
285,1029600.0,278
286,1033200.0,271
287,1036800.0,270
288,1040400.0,265
289,1044000.0,269
290,1047600.0,274
291,1051200.0,276
292,1054800.0,289
293,1058400.0,308
294,1062000.0,311
295,1065600.0,323
296,1069200.0,330
297,1072800.0,352
298,1076400.0,345
299,1080000.0,345
300,1083600.0,346
301,1087200.0,353
302,1090800.0,346
303,1094400.0,339
304,1098000.0,331
305,1101600.0,320
306,1105200.0,318
307,1108800.0,298
308,1112400.0,277
309,1116000.0,271
310,1119600.0,268
311,1123200.0,263
312,1126800.0,262
313,1130400.0,256
314,1134000.0,267
315,1137600.0,282
316,1141200.0,278
317,1144800.0,295
318,1148400.0,295
319,1152000.0,316
320,1155600.0,336
321,1159200.0,341
322,1162800.0,343
323,1166400.0,355
324,1170000.0,345
325,1173600.0,349
326,1177200.0,342
327,1180800.0,335
328,1184400.0,326
329,1188000.0,320
330,1191600.0,298
331,1195200.0,290
332,1198800.0,276
333,1202400.0,264
334,1206000.0,262
335,1209600.0,262
336,1213200.0,257
337,1216800.0,261
338,1220400.0,256
339,1224000.0,268
340,1227600.0,277
341,1231200.0,292
342,1234800.0,303
343,1238400.0,311
344,1242000.0,329
345,1245600.0,337
346,1249200.0,330
347,1252800.0,341
348,1256400.0,343
349,1260000.0,341

and I got values of either ~23.5 or 25 for the period of the data, which I know isn't the period because the dummy data has a period of 24 hours.

I expected to get a value of 24 hours. Not sure what else to do here.

In order to reproduce this, the csv file needs to be in a directory ./Config/listOfPeaks/WES/

1

There are 1 best solutions below

0
Reinderien On

You should trim your time series so that it's a multiple of your predicted period. Significantly simplifying your code,

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytz
from datetime import datetime, timedelta
from scipy.fft import rfft, rfftfreq


class OrganizeData:
    def __init__(self, runNum: int) -> None:
        self.runNum = runNum
        self.settingDictionary = {'displayPlots': True}
        self.peaksDF, self.unixStartTime = self.fetchPeaksData()
        self.unixEndTime = self.cutEnvData()
        self.plotStartTime, self.plotEndTime, self.plotUnixStart, self.plotUnixEnd = self.plotDates()

    def fetchPeaksData(self) -> tuple[pd.DataFrame, float]:
        peaksCSVFilePath = "peaks_Run{}.csv".format(self.runNum)
        csvCols = ['BIN', 'TIMESTAMP', 'PEAK']
        peaksDF = pd.read_csv(
            peaksCSVFilePath,
            names=csvCols,
            header=None
        )

        # Check Run Settings File for dataStartTime
        if "dataStartTime" not in self.settingDictionary:
            print("dataStartTime not in Run Settings.\n")
            self.settingDictionary["dataStartTime"] = '2000-01-01 00:00'

        # Convert dataStartTime to unix time
        dST_Xtz = datetime.strptime(
            self.settingDictionary["dataStartTime"],
            '%Y-%m-%d %H:%M'
        )
        Mtz = pytz.timezone("US/Mountain")
        dST_Mtz = Mtz.localize(dST_Xtz)
        unixStartTime = dST_Mtz.timestamp()

        # Add the unixStartTime to each of the TIMESTAMP values in peaksDF
        peaksDF['TIMESTAMP'] = peaksDF['TIMESTAMP'] + unixStartTime

        # Add a DateTime Object to the DataFrame
        peaksDF['DATETIME'] = pd.to_datetime(peaksDF['TIMESTAMP'], unit='s')

        return peaksDF, unixStartTime

    def cutEnvData(self) -> float:
        unixEndTime = self.peaksDF['TIMESTAMP'].max()
        return unixEndTime

    def plotDates(self) -> tuple[datetime, datetime, float, float]:
        plotStartTime = datetime.fromtimestamp(
            self.unixStartTime,
            tz=pytz.timezone('America/Denver')
        ) - timedelta(days=1)
        plotEndTime = datetime.fromtimestamp(
            self.unixEndTime,
            tz=pytz.timezone('America/Denver')
        ) + timedelta(days=1)

        plotUnixStart = self.unixStartTime - 86400
        plotUnixEnd = self.unixEndTime + 86400

        return plotStartTime, plotEndTime, plotUnixStart, plotUnixEnd


class AnalyzeGain(OrganizeData):
    def __init__(self, runNum: int) -> None:
        # This calls the OrganizeData object
        super().__init__(runNum)

        self.peakArray = np.array(self.peaksDF['PEAK'])
        # peakArray is hourly. Trim it at a multiple of 24.
        n = self.peakArray.size
        self.peakArray = self.peakArray[:n - n%24]

        self.peaksFourier, self.normalizeFFT, self.peaksFFTFreq = self.peaksFFT()

    def peaksFFT(self) -> tuple[np.ndarray, float, np.ndarray]:
        peaksFourier = rfft(self.peakArray)
        N = len(self.peakArray)
        normalizeFFT = N / 2
        frequency_axis = rfftfreq(N, d=3600)
        return peaksFourier, normalizeFFT, frequency_axis


class PlotAnalysis(AnalyzeGain):
    def __init__(self, runNum: int) -> None:
        super().__init__(runNum)

    def plotPeaks(self) -> None:
        plt.figure("Peaks vs Date Run {}".format(self.runNum))
        plt.title("Peaks vs Date Run {}".format(self.runNum))
        plt.plot(
            self.peaksDF['DATETIME'],
            self.peaksDF['PEAK']
        )
        plt.xlabel("Date")
        plt.ylabel("Energy Channel")
        plt.xlim(self.plotStartTime, self.plotEndTime)
        plt.grid()

    def plotPeaksFFTNorm(self) -> None:
        plt.figure("Peaks rFFT for Run {}".format(self.runNum))
        plt.title("Peaks rFFT for Run {}".format(self.runNum))
        plt.plot(np.abs(self.peaksFourier) / self.normalizeFFT)
        plt.xlabel('Samples')
        plt.ylabel('Amplitude')

    def plotPeaksFFTFreq(self) -> None:
        plt.figure("Peaks rFFT Frequency Spectrum for Run {}".format(self.runNum))
        plt.title("Peaks rFFT Frequency Spectrum for Run {}".format(self.runNum))
        plt.plot(
            self.peaksFFTFreq * 1e6,
            np.abs(self.peaksFourier) / self.normalizeFFT
        )
        plt.xlabel('Frequency (uHz)')
        plt.ylabel('Amplitude')

    def plotPeaksFFTPeriodicity(self) -> None:
        plt.figure("Peaks rFFT Periodicity Spectrum for Run {}".format(self.runNum))
        plt.title("Peaks rFFT Periodicity Spectrum for Run {}".format(self.runNum))
        plt.scatter(
            1 / (3600 * self.peaksFFTFreq[1:]),
            np.abs(self.peaksFourier[1:]) / self.normalizeFFT,
        )
        plt.xlabel('Period (Hr)')
        plt.ylabel('Value')


def main() -> None:
    plot = PlotAnalysis(runNum=0)
    plot.plotPeaks()
    plot.plotPeaksFFTNorm()
    plot.plotPeaksFFTFreq()
    plot.plotPeaksFFTPeriodicity()
    plt.show()


if __name__ == '__main__':
    main()

Cleaner spike at 11.574 uHz (one day):

spectrum