diff --git a/galore/cli/galore_get_cs.py b/galore/cli/galore_get_cs.py index ed87708..6c6c0ec 100644 --- a/galore/cli/galore_get_cs.py +++ b/galore/cli/galore_get_cs.py @@ -22,30 +22,64 @@ from argparse import ArgumentParser import galore.cross_sections + def main(): parser = get_parser() args = parser.parse_args() args = vars(args) run(**args) + def get_parser(): parser = ArgumentParser() + # parser.add_argument('energy', type=str, + # help=""" + # Photon energy, expressed as source type: "he2" for He (II), "alka" for + # Al k-alpha, (values from Yeh/Lindau (1985)) or as energy in keV (values + # from polynomial fit to Scofield (1973)).""") + # parser.add_argument('elements', type=str, nargs='+', help=""" + # Space-separated symbols for elements in material.""") + parser.add_argument('energy', type=str, help=""" + If you don't input dataset: Photon energy, expressed as source type: "he2" for He (II), "alka" for Al k-alpha, (values from Yeh/Lindau (1985)) or as energy in keV (values - from polynomial fit to Scofield (1973)).""") - parser.add_argument('elements', type=str, nargs='+', help=""" + from polynomial fit to Scofield (1973)). + if you input dataset: + Photon energy, 1 to 1500keV for Scofield dataset, 10.2 to 8047.8 eV for Yeh dataset""") + + parser.add_argument('elements', nargs='+', + help=""" Space-separated symbols for elements in material.""") + parser.add_argument('--dataset', type=str.lower, choices=['scofield', 'yeh'], + help="""Accepted values are 'Scofield' and 'Yeh """) + return parser -def run(energy, elements): - cross_sections = galore.get_cross_sections(energy, elements) + +def run(energy, elements, dataset=None): + cross_sections = galore.get_cross_sections(energy, elements, dataset) logging = galore.cross_sections.cross_sections_info(cross_sections) + + # some input will lead to None cross sections result + if cross_sections is None: + logging.warning("The cross section is None, please check the input") + + # inform user the closest energy of input + if dataset != None: + logging.warning('The closest energy of input is {energy}'.format( + energy=cross_sections['energy'])) + + # inform user if energy input is out of range + if dataset.lower() == 'scofield' and float(energy) > 1500: + logging.warning('The maximum energy of Scofield is 1500 keV') + logging.info("Photoionisation cross sections per electron:") for element in elements: + if 'warning' in cross_sections[element]: logging.warning(" {0}: {1}".format( element, cross_sections[element]['warning'])) diff --git a/galore/cli/galore_install_data.py b/galore/cli/galore_install_data.py new file mode 100644 index 0000000..1b37e61 --- /dev/null +++ b/galore/cli/galore_install_data.py @@ -0,0 +1,31 @@ +import logging +from argparse import ArgumentParser +import galore.cross_sections + + +def main(): + parser = get_parser() + args = parser.parse_args() + args = vars(args) + run(**args) + + +def get_parser(): + parser = ArgumentParser() + parser.add_argument('dataset', type=str.lower, choices= ['scofield','yeh'], + help=""" Accepted values are 'Scofield' and 'Yeh'""") + + return parser + + +def run(dataset): + if dataset.lower() == 'scofield' or dataset.lower() == 'yeh': + + url, data_file_dir, data_file_path = galore.cross_sections.get_csv_file_path( + dataset) + galore.cross_sections.galore_install_data( + url, data_file_dir, data_file_path) + + else: + print("Dataset '{dataset}' was not recognised. Accepted values are 'Scofield' and 'Yeh'.".format(dataset = dataset)) + diff --git a/galore/cross_sections.py b/galore/cross_sections.py index 6ab7470..300f3b2 100644 --- a/galore/cross_sections.py +++ b/galore/cross_sections.py @@ -1,3 +1,11 @@ +import logging +import urllib.request +import os +import platform +import zipfile +import numpy as np +from difflib import SequenceMatcher + import os.path from pkg_resources import resource_filename from json import load as json_load @@ -9,7 +17,7 @@ from numpy import exp, log -def get_cross_sections(weighting, elements=None): +def get_cross_sections(weighting, elements=None, dataset=None): """Get photoionization cross-section weighting data. For known sources, data is based on tabulation of Yeh/Lindau (1985).[1] @@ -23,8 +31,9 @@ def get_cross_sections(weighting, elements=None): Report No. UCRL-51326 Args: - weighting (str or float): Data source for photoionization - cross-sections. If the string is a known keyword then data will + weighting (str or float): If float and for dataset is None, data source + for photoionization cross-sections, for dataset is str, connected + Photon energy. If the string is a known keyword then data will be drawn from files included with Galore. Otherwise, the string will be interpreted as a path to a JSON file containing data arranged in the same way as the output of this function. @@ -33,6 +42,9 @@ def get_cross_sections(weighting, elements=None): included. When using a JSON dataset (including the inbuilt Yeh/Lindau) this parameter will be ignored as the entire dataset has already been loaded into memory. + datase (str or None): If None the weighting and elements are as discribed + above. If string, the string will be 'Scofield' or 'Yeh'. And the + weighting would be the connected Photon energy. Returns: dict: @@ -46,27 +58,56 @@ def get_cross_sections(weighting, elements=None): may be used to store metadata. """ - - try: - energy = float(weighting) - return get_cross_sections_scofield(energy, elements=elements) - - except ValueError: - if isinstance(weighting, str): - if weighting.lower() in ('alka', 'he2', 'yeh_haxpes'): - return get_cross_sections_yeh(weighting) - elif weighting.lower() in ('xps', 'ups', 'haxpes'): - raise ValueError("Key '{0}' is no longer accepted for " - "weighting. Please use 'alka' for Al k-alpha," - " 'he2' for He (II) or 'yeh_haxpes' for " - "8047.8 eV HAXPES".format(weighting)) - else: - return get_cross_sections_json(weighting) - - # A string or a number would have hit a return statement by now - raise ValueError("Weighting not understood as string or float. ", - "Please use a keyword, path to JSON file or an " - "energy value in eV") + if dataset is None: + try: + energy = float(weighting) + return get_cross_sections_scofield(energy, elements=elements) + + except ValueError: + if isinstance(weighting, str): + if weighting.lower() in ('alka', 'he2', 'yeh_haxpes'): + return get_cross_sections_yeh(weighting) + elif weighting.lower() in ('xps', 'ups', 'haxpes'): + raise ValueError("Key '{0}' is no longer accepted for " + "weighting. Please use 'alka' for Al k-alpha," + " 'he2' for He (II) or 'yeh_haxpes' for " + "8047.8 eV HAXPES".format(weighting)) + else: + return get_cross_sections_json(weighting) + + # A string or a number would have hit a return statement by now + raise ValueError("Weighting not understood as string or float. ", + "Please use a keyword, path to JSON file or an " + "energy value in eV") + else: + cross_sections_dict = {} + energy = weighting + metadata = _get_metadata(dataset) + cross_sections_dict.update(metadata) + + _, _, data_file_path = get_csv_file_path(dataset) + + if os.path.isfile(data_file_path) == True: + for element in elements: + ### modification for elements to mathcing the file names + if len(element) == 1: + if dataset.lower() == 'scofield': + data = read_csv_file(data_file_path, element + '_') + elif dataset.lower() == 'yeh': + data = read_csv_file(data_file_path, '_' + element) + else: + data = read_csv_file(data_file_path, element) + ### obtain the cross sections data for each elements + ### and the closest energy for calculation process + cross_sections, closest_energy = _cross_sections_from_csv_data( + energy, data, dataset) + cross_sections_dict['energy'] = closest_energy + cross_sections_dict[element] = cross_sections + + return cross_sections_dict + else: + print( + "Do you want to install data? \n You can enter \n galore-install-data {dataset}".format(dataset=dataset)) def cross_sections_info(cross_sections, logging=None): @@ -91,12 +132,17 @@ def cross_sections_info(cross_sections, logging=None): console = logging.StreamHandler() logging.getLogger().addHandler(console) - if 'energy' in cross_sections: - logging.info(" Photon energy: {0}".format(cross_sections['energy'])) - if 'citation' in cross_sections: - logging.info(" Citation: {0}".format(cross_sections['citation'])) - if 'link' in cross_sections: - logging.info(" Link: {0}".format(cross_sections['link'])) + if cross_sections is None: + pass + + else: + if 'energy' in cross_sections: + logging.info(" Photon energy: {0}".format( + cross_sections['energy'])) + if 'citation' in cross_sections: + logging.info(" Citation: {0}".format(cross_sections['citation'])) + if 'link' in cross_sections: + logging.info(" Link: {0}".format(cross_sections['link'])) return logging @@ -281,3 +327,238 @@ def _eval_fit(energy, coeffs): orb: _eval_fit(energy, np_fromstr(coeffs)) for orb, coeffs in orbitals_fits}}) return el_cross_sections + + +def read_csv_file(data_file_path, element_name): + """ + Generate data from csv archive without decompression + + Args: + data_file_path: path to tarfile of CSV data + element_name: element name for connected CSV file + + Returns: + dict: containing 'headers', 'electron_counts' + (lists of str and int respectively) and 'data_table', + a 2-D nested list of floats. Missing data is represented as None. + + """ + + # Open zipfile and match the element_name to csv file name + with zipfile.ZipFile(data_file_path) as data_zf: + namelist = data_zf.namelist() + for filename in namelist: + if SequenceMatcher(None, element_name, filename[-6:-4]).ratio() > 0.99: + data = data_zf.read(filename).decode() + + # string to list + data_string = data.split('\r\n') + + # get number of colunm headers + column_headers = [column_header for column_header in data_string[0].split( + ',') if column_header != ''] + n_columns = len(column_headers) + + # build main matrix + main_matrix = [row.split(',')[0:n_columns] for row in data_string] + + # remove empty values + midterm = [row for row in main_matrix if any(row) == True] + data_table = midterm[1:-1] + electron_counts_list = [ + occupancy for occupancy in midterm[-1] if occupancy != ''][1:] + + # replace '' in data table with NaN and change string list into float array + data_table = np.array([[float('NaN') if cross_section == '' else float(cross_section) for cross_section in row] + for row in data_table]) + electron_counts = np.array([float(occupancy) + for occupancy in electron_counts_list]) + + # build result dict + result_dict = {} + result_dict['headers'] = column_headers + result_dict['electron_counts'] = electron_counts + result_dict['data_table'] = data_table + + return result_dict + + +def _get_avg_orbital_cross_sections(subshells_cross_sections, electrons_numbers): + """ + Obtain average cross sections of subshell of each obital when process Scofield data. + + Args: + subshell_cross_sections(np.array): The subshells cross sections array of the highest orbital + electrons_number(np.array): corresponding electrons number of these subshells + + Returns: + avg_orbital_cross_sections(np.array): average cross sections of subshells + + """ + + subshells_cross_sections_sum = sum(subshells_cross_sections) + + subshells_electrons_numbers_sum = sum(electrons_numbers) + + avg_orbital_cross_sections = subshells_cross_sections_sum / \ + subshells_electrons_numbers_sum + + return avg_orbital_cross_sections + + +def _cross_sections_from_csv_data(energy, data, dataset): + """ + Get cross sections from data dict + For known sources, data is based on tabulation of Yeh/Lindau (1985).[1] + Otherwise, energies in keV from 1-1500 are used with log-log polynomial + parametrisation of data from Scofield.[2] + + References: + 1. Yeh, J.J. and Lindau, I. (1985) + Atomic Data and Nuclear Data Tables 32 pp 1-155 + 2. J. H. Scofield (1973) Lawrence Livermore National Laboratory + Report No. UCRL-51326 + + Args: + energy(float): energy value + data(dict): data from read_csv_file() + reference(str): 'Scofield' or 'Yeh' + + Returns: + orbitals_cross_sections_dict: containing orbitals 's', 'p', 'd', 'f' and + highest orbital cross sections of each orbital. + Missing data is represented as None. + + """ + + n_subshells = len(data['electron_counts']) + subshells_headers = data['headers'][-n_subshells:] + + # build dicts for easy calculation. + electron_counts_by_subshells = dict( + zip(subshells_headers, data['electron_counts'])) + cross_sections_by_subshells = dict( + zip(subshells_headers, data['data_table'].T[-n_subshells:])) + + # match the import energy + energy_index = np.abs(data['data_table'].T[0] - float(energy)).argmin() + closest_energy = data['data_table'].T[0][energy_index] + + # build result dict + orbitals_cross_sections_dict = {} + + # result for s orbital + s_cross_sections = np.array( + [value[energy_index] for key, value in cross_sections_by_subshells.items() if 's' in key]) + electrons_numbers = np.array( + [value for key, value in electron_counts_by_subshells.items() if 's' in key]) + # get highest obital cross section of obital s + highest_obital_cross_section = s_cross_sections[-1]/electrons_numbers[-1] + orbitals_cross_sections_dict['s'] = highest_obital_cross_section + + # result for 'p', 'd', 'f' orbitals + orbitals = ['p', 'd', 'f'] + + for orbital in orbitals: + interm_matrix = np.array( + [value for key, value in cross_sections_by_subshells.items() if orbital in key]).T + electrons_numbers = np.array( + [value for key, value in electron_counts_by_subshells.items() if orbital in key]) + + if np.shape(interm_matrix) != (0,): + if dataset.lower() == 'scofield': + subshells_cross_sections = interm_matrix[energy_index] + highest_obital_subshells_cross_sections = subshells_cross_sections[-2:] + highest_obital_subshells_electrons_numbers = electrons_numbers[-2:] + result = _get_avg_orbital_cross_sections( + highest_obital_subshells_cross_sections, highest_obital_subshells_electrons_numbers) + # get highest cross section of this obital + highest_obital_cross_section = result + orbitals_cross_sections_dict[orbital] = highest_obital_cross_section + + elif dataset.lower() == 'yeh': + obital_cross_sections = interm_matrix[energy_index] + + # get highest cross section of this obital + highest_obital_cross_section = obital_cross_sections[-1] / \ + electrons_numbers[-1] + orbitals_cross_sections_dict[orbital] = highest_obital_cross_section + + return orbitals_cross_sections_dict, closest_energy + + +def _get_metadata(dataset): + """ + Args: + dataset(str): 'Scofield' or 'Yeh' + + Note: 1.'Scofield' for J. H. Scofield (1973) + Lawrence Livermore National Laboratory Report No. UCRL-51326 + 2.'Yeh' for Yeh, J.J. and Lindau, I. (1985) + Atomic Data and Nuclear Data Tables 32 pp 1-155 + + Returns: + metadata_dict: containing the description of input reference + + """ + + metadata_dict = {} + + if dataset.lower() == 'scofield': + metadata_dict['citation'] = 'J.H. Scofield, Theoretical photoionization cross sections from 1 to 1500 keV' + metadata_dict['link'] = 'https://doi.org/10.2172/4545040' + elif dataset.lower() == 'yeh': + metadata_dict['citation'] = 'Yeh, J.J. and Lindau, I. (1985) Atomic Data and Nuclear Data Tables 32 pp 1-155' + metadata_dict['link'] = 'https://doi.org/10.1016/0092-640X(85)90016-6' + else: + metadata_dict( + 'Reference error: you can enter reference as "Scofield" or "Yeh" ') + return metadata_dict + + +def galore_install_data(url, data_file_dir, data_file_path): + """This function is API for command galroe-install-data""" + + + # if users re-install the dataset inform them the file path + if os.path.isfile(data_file_path) == True: + print("Data file exists. \n The file path is {data_file_path}".format( + data_file_path=data_file_path)) + + else: + print("Downloading file...") + + try: + os.mkdir(data_file_dir) + except: + pass + + urllib.request.urlretrieve(url, data_file_path) + + # inform user the required file is downloaded and print out the path + if os.path.isfile(data_file_path) == True: + print("Done\nThe file path is {data_file_path}".format( + data_file_path=data_file_path)) + + +def get_csv_file_path(dataset): + """This function is used to obtain the url and create dataset file folder + for different operating systems""" + + if platform.system() == "Windows": + + data_file_dir = os.path.join(os.getenv('LOCALAPPDATA'), 'galore_data') + else: + data_file_dir = os.path.join(os.path.expanduser('~'), '.galore_data') + + if dataset.lower() == 'scofield': + data_file_path = os.path.join( + data_file_dir, 'Scofield_csv_database.zip') + url = 'https://ndownloader.figshare.com/articles/15081888/versions/1' + + elif dataset.lower() == 'yeh': + data_file_path = os.path.join( + data_file_dir, 'Yeh_Lindau_1985_Xsection_CSV_Database.zip') + url = 'https://ndownloader.figshare.com/articles/15090012/versions/1' + + return url, data_file_dir, data_file_path diff --git a/setup.py b/setup.py index e74f65d..84a5faf 100644 --- a/setup.py +++ b/setup.py @@ -71,7 +71,8 @@ def unit_tests(): 'console_scripts': [ 'galore=galore.cli.galore:main', 'galore-get-cs=galore.cli.galore_get_cs:main', - 'galore-plot-cs=galore.cli.galore_plot_cs:main' + 'galore-plot-cs=galore.cli.galore_plot_cs:main', + 'galore-install-data=galore.cli.galore_install_data:main' ] }, test_suite='setup.unit_tests' diff --git a/test/Al_test_scofield.json b/test/Al_test_scofield.json new file mode 100644 index 0000000..43008a3 --- /dev/null +++ b/test/Al_test_scofield.json @@ -0,0 +1,448 @@ +{ + "headers": [ + "PhotonEnergy[keV]", + "TotalCrossSection[barn/atom]", + "K-shell", + "L-shell", + "M-shell", + "All Other", + "1s1/2", + "2s1/2", + "2p1/2", + "2p3/2", + "3s1/2", + "3p1/2", + "3p3/2" + ], + "electron_counts": [ + 2.0, + 2.0, + 2.0, + 4.0, + 2.0, + 0.33, + 0.67 + ], + "data_table": [ + [ + 1.0, + 52999.0, + 0.0, + 51057.0, + 1941.4, + 0.0, + 0.0, + 24855.0, + 8827.8, + 17374.0, + 1782.6, + 53.49, + 105.31 + ], + [ + 1.5, + 17929.0, + 0.0, + 17170.0, + 759.02, + 0.0, + 0.0, + 10063.0, + 2398.5, + 4708.3, + 714.87, + 14.898, + 29.25 + ], + [ + 1.5483, + 16451.0, + 0.0, + 15748.0, + 703.62, + 0.0, + 0.0, + 9347.9, + 2160.2, + 4239.6, + 663.82, + 13.434, + 26.37 + ], + [ + 1.5607, + 176940.0, + 160840.0, + 15409.0, + 690.31, + 0.0, + 160840.0, + 9175.7, + 2104.0, + 4129.1, + 651.53, + 13.088, + 25.69 + ], + [ + 2.0, + 101310.0, + 93139.0, + 7794.2, + 377.61, + 0.0, + 93139.0, + 5083.5, + 916.09, + 1794.6, + 360.54, + 5.77, + 11.303 + ], + [ + 3.0, + 35238.0, + 32595.0, + 2507.6, + 134.96, + 0.0, + 32595.0, + 1843.1, + 225.09, + 439.45, + 130.73, + 1.4348, + 2.7951 + ], + [ + 4.0, + 16090.0, + 14921.0, + 1105.9, + 63.189, + 0.0, + 14921.0, + 868.18, + 80.659, + 157.05, + 61.665, + 0.518, + 1.0061 + ], + [ + 5.0, + 8612.3, + 7996.5, + 581.34, + 34.483, + 0.0, + 7996.5, + 476.0, + 35.799, + 69.539, + 33.805, + 0.23093, + 0.44751 + ], + [ + 6.0, + 5119.6, + 4756.9, + 341.89, + 20.838, + 0.0, + 4756.9, + 288.28, + 18.244, + 35.362, + 20.491, + 0.11807, + 0.22827 + ], + [ + 8.0, + 2218.4, + 2062.7, + 146.42, + 9.25, + 0.0, + 2062.7, + 128.31, + 6.1832, + 11.935, + 9.1325, + 0.04018, + 0.077369 + ], + [ + 10.0, + 1145.8, + 1065.7, + 75.168, + 4.8577, + 0.0, + 1065.7, + 67.476, + 2.6318, + 5.0605, + 4.8077, + 0.017149, + 0.032897 + ], + [ + 15.0, + 336.68, + 313.28, + 21.937, + 1.4635, + 0.0, + 313.28, + 20.362, + 0.54183, + 1.0335, + 1.4532, + 0.0035457, + 0.0067436 + ], + [ + 20.0, + 138.88, + 129.24, + 9.0237, + 0.61201, + 0.0, + 129.24, + 8.5208, + 0.17378, + 0.32909, + 0.60872, + 0.00114, + 0.0021522 + ], + [ + 30.0, + 39.08, + 36.371, + 2.5344, + 0.17483, + 0.0, + 36.371, + 2.4351, + 0.034619, + 0.0647, + 0.17417, + 0.00022766, + 0.000424 + ], + [ + 40.0, + 15.702, + 14.613, + 1.018, + 0.070834, + 0.0, + 14.613, + 0.9867, + 0.011018, + 0.020282, + 0.070628, + 7.2366e-05, + 0.00013321 + ], + [ + 50.0, + 7.6968, + 7.1628, + 0.49903, + 0.034909, + 0.0, + 7.1628, + 0.48625, + 0.0045307, + 0.0082482, + 0.034825, + 2.9764e-05, + 5.4204e-05 + ], + [ + 60.0, + 4.2857, + 3.9882, + 0.27799, + 0.019511, + 0.0, + 3.9882, + 0.27184, + 0.0021968, + 0.0039584, + 0.01947, + 1.4434e-05, + 2.6025e-05 + ], + [ + 80.0, + 1.695, + 1.5773, + 0.11001, + 0.007754, + 0.0, + 1.5773, + 0.10805, + 0.00070607, + 0.0012506, + 0.0077411, + 4.6401e-06, + 8.2241e-06 + ], + [ + 100.0, + 0.82447, + 0.76715, + 0.053532, + 0.003783, + 0.0, + 0.76715, + 0.052721, + 0.00029589, + 0.0005158, + 0.0037777, + 1.9426e-06, + 3.3947e-06 + ], + [ + 150.0, + 0.22362, + 0.20806, + 0.014531, + 0.0010304, + 0.0, + 0.20806, + 0.014361, + 6.3077e-05, + 0.00010679, + 0.0010292, + 4.1366e-07, + 7.0328e-07 + ], + [ + 200.0, + 0.089703, + 0.083457, + 0.0058323, + 0.00041418, + 0.0, + 0.083457, + 0.005774, + 2.1879e-05, + 3.6399e-05, + 0.0004138, + 1.4326e-07, + 2.3986e-07 + ], + [ + 300.0, + 0.025735, + 0.023941, + 0.0016744, + 0.00011908, + 0.0, + 0.023941, + 0.0016604, + 5.3001e-06, + 8.7016e-06, + 0.00011899, + 3.4581e-08, + 5.7462e-08 + ], + [ + 400.0, + 0.011115, + 0.01034, + 0.0007235, + 5.1488e-05, + 0.0, + 0.01034, + 0.00071801, + 2.0679e-06, + 3.4264e-06, + 5.1452e-05, + 1.3446e-08, + 2.2704e-08 + ], + [ + 500.0, + 0.0060211, + 0.0056011, + 0.000392, + 2.7908e-05, + 0.0, + 0.0056011, + 0.0003892, + 1.0393e-06, + 1.7677e-06, + 2.7889e-05, + 6.7468e-09, + 1.171e-08 + ], + [ + 600.0, + 0.0037644, + 0.0035022, + 0.00024481, + 1.7441e-05, + 0.0, + 0.0035022, + 0.00024313, + 6.132e-07, + 1.0667e-06, + 1.743e-05, + 3.9654e-09, + 7.1123e-09 + ], + [ + 800.0, + 0.0019045, + 0.0017718, + 0.00012383, + 8.833e-06, + 0.0, + 0.0017718, + 0.00012303, + 2.8161e-07, + 5.2323e-07, + 8.8277e-06, + 1.8197e-09, + 3.5109e-09 + ], + [ + 1000.0, + 0.0011839, + 0.0011015, + 7.696e-05, + 5.4993e-06, + 0.0, + 0.0011015, + 7.648e-05, + 1.6341e-07, + 3.1698e-07, + 5.4961e-06, + 1.0487e-09, + 2.17e-09 + ], + [ + 1500.0, + 0.00055624, + 0.00051749, + 3.6151e-05, + 2.6016e-06, + 0.0, + 0.00051749, + 3.5938e-05, + 6.7042e-08, + 1.4616e-07, + 2.6001e-06, + 4.3805e-10, + 1.0467e-09 + ] + ] +} \ No newline at end of file diff --git a/test/Al_test_yeh.json b/test/Al_test_yeh.json new file mode 100644 index 0000000..699b2d2 --- /dev/null +++ b/test/Al_test_yeh.json @@ -0,0 +1,129 @@ +{ + "headers": [ + "Photon Energy / eV", + "2s", + "2p", + "3s", + "3p" + ], + "electron_counts": [ + 2.0, + 6.0, + 2.0, + 1.0 + ], + "data_table": [ + [ + 10.2, + 0.0, + 0.0, + 0.0, + 1.938 + ], + [ + 16.7, + 0.0, + 0.0, + 0.2191, + 0.1572 + ], + [ + 21.2, + 0.0, + 0.0, + 0.3431, + 0.088 + ], + [ + 26.8, + 0.0, + 0.0, + 0.3858, + 0.1017 + ], + [ + 40.8, + 0.0, + 0.0, + 0.3339, + 0.123 + ], + [ + 80.0, + 0.0, + 0.0, + 0.1572, + 0.065 + ], + [ + 132.3, + 0.5224, + 4.809, + 0.071, + 0.027 + ], + [ + 151.4, + 0.5051, + 3.841, + 0.058, + 0.02 + ], + [ + 200.0, + 0.3927, + 2.176, + 0.037, + 0.011 + ], + [ + 300.0, + 0.2292, + 0.8226, + 0.019, + 0.0046 + ], + [ + 600.0, + 0.07, + 0.123, + 0.005, + 0.00064 + ], + [ + 800.0, + 0.039, + 0.051, + 0.0029, + 0.00036 + ], + [ + 1041.0, + 0.022, + 0.022, + 0.0016, + 0.0001 + ], + [ + 1253.6, + 0.015, + 0.012, + 0.0011, + 7.2e-05 + ], + [ + 1486.6, + 0.01, + 0.0072, + 0.00078, + 5.9e-05 + ], + [ + 8047.8, + 0.00013, + 1.7e-05, + 1.2e-05, + 9.8e-08 + ] + ] +} \ No newline at end of file diff --git a/test/new_test.py b/test/new_test.py new file mode 100644 index 0000000..4b9c6c2 --- /dev/null +++ b/test/new_test.py @@ -0,0 +1,72 @@ +import numpy as np +import unittest +import json +import os +import platform +from os.path import join as path_join + + +from galore.cross_sections import get_csv_file_path +from galore.cross_sections import _cross_sections_from_csv_data + +##obtain directory of current test file +test_dir = os.path.abspath(os.path.dirname(__file__)) + +##obtain expected directory of data-storing file of different systems +if platform.system() == "Windows": + correct_directory = os.path.join(os.getenv('LOCALAPPDATA'), 'galore_data') +else: + correct_directory = os.path.join(os.path.expanduser('~'), '.galore_data') + + +class test_get_cross_sections_from_data_table(unittest.TestCase): + '''To check function _cross_sections_from_csv_data works well with csv datatable''' + + def test_get_cross_sections_from_scofield_datatable(self): + ##Import datatable stored in json file and convert them to the correct form, which mocks csv datatable for element Al + json_path = path_join(test_dir, 'Al_test_scofield.json') + with open(json_path) as json_file: + data = json.load(json_file) + data['data_table'] = np.array(data['data_table']) + data['electron_counts'] = np.array(data['electron_counts']) + + ##check the function goes well with above datatable + cross_sections_scofield,_= _cross_sections_from_csv_data(800,data,'Scofield') + self.assertAlmostEqual(cross_sections_scofield['s'],4.41385e-06) + + + def test_get_cross_sections_from_yeh_datatable(self): + + ##Import datatable stored in json file and convert them to the correct form, which mocks csv datatable for element Al + json_path = path_join(test_dir, 'Al_test_yeh.json') + with open(json_path) as json_file: + data = json.load(json_file) + data['data_table'] = np.array(data['data_table']) + data['electron_counts'] = np.array(data['electron_counts']) + + ##check the function goes well with above datatable + cross_sections_scofield,_= _cross_sections_from_csv_data(800,data,'Yeh') + self.assertAlmostEqual(cross_sections_scofield['s'], 0.00145) + +class test_datafile_path(unittest.TestCase): + '''To check the path created by function get_csv_file_path is correct ''' + + def test_scofield_directory_and_path(self): + ## Simulate expected correct directory and path for scofield data + correct_path = os.path.join(correct_directory, 'Scofield_csv_database.zip') + _,_,Scofield_file_path = get_csv_file_path('Scofield') + ## check the path obtained from get_csv_file_path is correct + self.assertEqual(correct_path, Scofield_file_path) + + + def test_yeh_directory_and_path(self): + ## Simulate expected correct directory and path for yeh data + correct_path = os.path.join(correct_directory, 'Yeh_Lindau_1985_Xsection_CSV_Database.zip') + _,_,Yeh_file_path = get_csv_file_path('yeh') + ## check the path obtained from get_csv_file_path is correct + self.assertEqual(correct_path, Yeh_file_path) + + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/test/test_csv_data.py b/test/test_csv_data.py new file mode 100644 index 0000000..968a1da --- /dev/null +++ b/test/test_csv_data.py @@ -0,0 +1,73 @@ +import numpy as np +import unittest +import json +import os +import platform +from os.path import join as path_join + + +from galore.cross_sections import get_csv_file_path +from galore.cross_sections import _cross_sections_from_csv_data + +# obtain directory of current test file +test_dir = os.path.abspath(os.path.dirname(__file__)) + +# obtain expected directory of data-storing file of different systems +if platform.system() == "Windows": + correct_directory = os.path.join(os.getenv('LOCALAPPDATA'), 'galore_data') +else: + correct_directory = os.path.join(os.path.expanduser('~'), '.galore_data') + + +class test_get_cross_sections_from_data_table(unittest.TestCase): + '''To check function _cross_sections_from_csv_data works well with csv datatable''' + + def test_get_cross_sections_from_scofield_datatable(self): + # Import datatable stored in json file and convert them to the correct form, which mocks csv datatable for element Al + json_path = path_join(test_dir, 'Al_test_scofield.json') + with open(json_path) as json_file: + data = json.load(json_file) + data['data_table'] = np.array(data['data_table']) + data['electron_counts'] = np.array(data['electron_counts']) + + # check the function goes well with above datatable + cross_sections_scofield, _ = _cross_sections_from_csv_data( + 800, data, 'Scofield') + self.assertAlmostEqual(cross_sections_scofield['s'], 4.41385e-06) + + def test_get_cross_sections_from_yeh_datatable(self): + + # Import datatable stored in json file and convert them to the correct form, which mocks csv datatable for element Al + json_path = path_join(test_dir, 'Al_test_yeh.json') + with open(json_path) as json_file: + data = json.load(json_file) + data['data_table'] = np.array(data['data_table']) + data['electron_counts'] = np.array(data['electron_counts']) + + # check the function goes well with above datatable + cross_sections_yeh, _ = _cross_sections_from_csv_data(800, data, 'Yeh') + self.assertAlmostEqual(cross_sections_yeh['s'], 0.00145) + + +class test_datafile_path(unittest.TestCase): + '''To check the path created by function get_csv_file_path is correct ''' + + def test_scofield_directory_and_path(self): + # Simulate expected correct directory and path for scofield data + correct_path = os.path.join( + correct_directory, 'Scofield_csv_database.zip') + _, _, scofield_file_path = get_csv_file_path('scofield') + # check the path obtained from get_csv_file_path is correct + self.assertEqual(correct_path, scofield_file_path) + + def test_yeh_directory_and_path(self): + # Simulate expected correct directory and path for yeh data + correct_path = os.path.join( + correct_directory, 'Yeh_Lindau_1985_Xsection_CSV_Database.zip') + _, _, yeh_file_path = get_csv_file_path('yeh') + # check the path obtained from get_csv_file_path is correct + self.assertEqual(correct_path, yeh_file_path) + + +if __name__ == '__main__': + unittest.main()