Coverage for calorine/nep/io.py: 100%
224 statements
« prev ^ index » next coverage.py v7.11.3, created at 2025-12-04 13:49 +0000
« prev ^ index » next coverage.py v7.11.3, created at 2025-12-04 13:49 +0000
1from os.path import exists
2from os.path import join as join_path
3from typing import Any, Iterable, NamedTuple, TextIO
4from warnings import warn
6import numpy as np
7from ase import Atoms
8from ase.io import read, write
9from ase.stress import voigt_6_to_full_3x3_stress
10from pandas import DataFrame
13def read_loss(filename: str) -> DataFrame:
14 """Parses a file in `loss.out` format from GPUMD and returns the
15 content as a data frame. More information concerning file format,
16 content and units can be found `here
17 <https://gpumd.org/nep/output_files/loss_out.html>`__.
19 Parameters
20 ----------
21 filename
22 input file name
24 """
25 data = np.loadtxt(filename)
26 if isinstance(data[0], np.float64):
27 # If only a single row in loss.out, append a dimension
28 data = data.reshape(1, -1)
29 if len(data[0]) == 6:
30 tags = 'total_loss L1 L2'
31 tags += ' RMSE_P_train'
32 tags += ' RMSE_P_test'
33 elif len(data[0]) == 10:
34 tags = 'total_loss L1 L2'
35 tags += ' RMSE_E_train RMSE_F_train RMSE_V_train'
36 tags += ' RMSE_E_test RMSE_F_test RMSE_V_test'
37 else:
38 raise ValueError(
39 f'Input file contains {len(data[0])} data columns. Expected 6 or 10 columns.'
40 )
41 generations = range(100, len(data) * 100 + 1, 100)
42 df = DataFrame(data=data[:, 1:], columns=tags.split(), index=generations)
43 return df
46def _write_structure_in_nep_format(structure: Atoms, f: TextIO) -> None:
47 """Write structure block into a file-like object in format readable by nep executable.
49 Parameters
50 ----------
51 structure
52 input structure; must hold information regarding energy and forces
53 f
54 file-like object to which to write
55 """
57 # Allowed keyword=value pairs. Use ASEs extyz write functionality.:
58 # lattice="ax ay az bx by bz cx cy cz" (mandatory)
59 # energy=energy_value (mandatory)
60 # virial="vxx vxy vxz vyx vyy vyz vzx vzy vzz" (optional)
61 # weight=relative_weight (optional)
62 # properties=property_name:data_type:number_of_columns
63 # species:S:1 (mandatory)
64 # pos:R:3 (mandatory)
65 # force:R:3 or forces:R:3 (mandatory)
66 try:
67 structure.get_potential_energy()
68 structure.get_forces() # calculate forces to have them on the Atoms object
69 except RuntimeError:
70 raise RuntimeError('Failed to retrieve energy and/or forces for structure')
71 if np.isclose(structure.get_volume(), 0):
72 raise ValueError('Structure cell must have a non-zero volume!')
73 try:
74 structure.get_stress()
75 except RuntimeError:
76 warn('Failed to retrieve stresses for structure')
77 write(filename=f, images=structure, write_info=True, format='extxyz')
80def write_structures(outfile: str, structures: list[Atoms]) -> None:
81 """Writes structures for training/testing in format readable by nep executable.
83 Parameters
84 ----------
85 outfile
86 output filename
87 structures
88 list of structures with energy, forces, and (possibly) stresses
89 """
90 with open(outfile, 'w') as f:
91 for structure in structures:
92 _write_structure_in_nep_format(structure, f)
95def write_nepfile(parameters: NamedTuple, dirname: str) -> None:
96 """Writes parameters file for NEP construction.
98 Parameters
99 ----------
100 parameters
101 input parameters; see `here <https://gpumd.org/nep/input_parameters/index.html>`__
102 dirname
103 directory in which to place input file and links
104 """
105 with open(join_path(dirname, 'nep.in'), 'w') as f:
106 for key, val in parameters.items():
107 f.write(f'{key} ')
108 if isinstance(val, Iterable):
109 f.write(' '.join([f'{v}' for v in val]))
110 else:
111 f.write(f'{val}')
112 f.write('\n')
115def read_nepfile(filename: str) -> dict[str, Any]:
116 """Returns the content of a configuration file (`nep.in`) as a dictionary.
118 Parameters
119 ----------
120 filename
121 input file name
122 """
123 int_vals = ['version', 'neuron', 'generation', 'batch', 'population',
124 'mode', 'model_type', 'charge_mode']
125 float_vals = ['lambda_1', 'lambda_2', 'lambda_e', 'lambda_f', 'lambda_v',
126 'lambda_q', 'lambda_shear', 'force_delta']
127 settings = {}
128 with open(filename) as f:
129 for line in f.readlines():
130 # remove comments - throw away everything after a '#'
131 cleaned = line.split('#', 1)[0].strip()
132 flds = cleaned.split()
133 if len(flds) == 0:
134 continue
135 settings[flds[0]] = ' '.join(flds[1:])
136 for key, val in settings.items():
137 if key in int_vals:
138 settings[key] = int(val)
139 elif key in float_vals:
140 settings[key] = float(val)
141 elif key in ['cutoff', 'n_max', 'l_max', 'basis_size', 'zbl', 'type_weight']:
142 settings[key] = [float(v) for v in val.split()]
143 elif key == 'type':
144 types = val.split()
145 types[0] = int(types[0])
146 settings[key] = types
147 return settings
150def read_structures(dirname: str) -> tuple[list[Atoms], list[Atoms]]:
151 """Parses the output files with training and test data from a nep run and returns their
152 content as two lists of structures, representing training and test data, respectively.
153 Target and predicted data are included in the :attr:`info` dict of the :class:`Atoms`
154 objects.
156 Parameters
157 ----------
158 dirname
159 Directory from which to read output files.
161 """
162 path = join_path(dirname)
163 if not exists(path):
164 raise FileNotFoundError(f'Directory {path} does not exist')
166 # fetch model type from nep input file
167 nep_info = read_nepfile(f'{path}/nep.in')
168 model_type = nep_info.get('model_type', 0)
170 # set up which files to parse, what dimensions to expect etc
171 # depending on the type of model that is parsed
172 if model_type == 0:
173 charge_mode = int(nep_info.get('charge_mode', 0))
174 if charge_mode not in [0, 1, 2]:
175 raise ValueError(f'Unknown charge_mode: {charge_mode}')
176 # files to parse: (sname, size, includes_target, per_atom)
177 files_to_parse = [
178 ('energy', 1, True, False),
179 ('force', 3, True, True),
180 ('virial', 6, True, False),
181 ('stress', 6, True, False),
182 ]
183 if charge_mode in [1, 2]:
184 # files to parse: (sname, size, includes_target, per_atom)
185 files_to_parse += [
186 ('charge', 1, False, True),
187 ('bec', 9, True, True),
188 ]
189 elif model_type == 1:
190 # files to parse: (sname, size, includes_target, per_atom)
191 files_to_parse = [('dipole', 3, True, False)]
192 elif model_type == 2:
193 # files to parse: (sname, size, includes_target, per_atom)
194 files_to_parse = [('polarizability', 6, True, False)]
195 else:
196 raise ValueError(f'Unknown model_type: {model_type}')
198 # read training and test data
199 structures = {}
200 for stype in ['train', 'test']:
201 filename = join_path(dirname, f'{stype}.xyz')
202 try:
203 structures[stype] = read(filename, format='extxyz', index=':')
204 except FileNotFoundError:
205 warn(f'File {filename} not found.')
206 structures[stype] = []
207 continue
209 n_structures = len(structures[stype])
211 # loop over files from which to read target data and predictions
212 for sname, size, includes_target, per_atom in files_to_parse:
213 infile = f'{sname}_{stype}.out'
214 ts, ps = _read_data_file(path, infile, includes_target=includes_target)
216 if ts is not None:
217 if ts.shape[1] != size:
218 raise ValueError(f'Target data in {infile} has unexpected shape:'
219 f' {ts.shape} (expected: (-1, {size}))')
220 if ps.shape[1] != size:
221 raise ValueError(f'Predicted data in {infile} has unexpected shape:'
222 f' {ps.shape} (expected: (-1, {size}))')
224 if per_atom:
225 # data per-atom, e.g., forces, per-atom-virials, Born effective charges ...
226 n_atoms_total = sum([len(s) for s in structures[stype]])
227 if len(ps) != n_atoms_total:
228 raise ValueError(f'Number of atoms in {infile} ({len(ps)})'
229 f' and {stype}.xyz ({n_atoms_total}) inconsistent.')
230 n = 0
231 for structure in structures[stype]:
232 nat = len(structure)
233 if ts is not None:
234 structure.info[f'{sname}_target'] = \
235 np.array(ts[n: n + nat]).reshape(nat, size)
236 structure.info[f'{sname}_predicted'] = \
237 np.array(ps[n: n + nat]).reshape(nat, size)
238 n += nat
239 else:
240 # data per structure, e.g., energy, virials, stress
241 if len(ps) != n_structures:
242 raise ValueError(f'Number of structures in {infile} ({len(ps)})'
243 f' and {stype}.xyz ({n_structures}) inconsistent.')
244 for k, structure in enumerate(structures[stype]):
245 assert ts is not None, 'This should not occur. Please report.'
246 t = ts[k]
247 assert np.shape(t) == (size,)
248 structure.info[f'{sname}_target'] = t
249 p = ps[k]
250 assert np.shape(p) == (size,)
251 structure.info[f'{sname}_predicted'] = p
253 # special handling of target data for BECs
254 # The target data for BECs need not be complete. In this case nep writes
255 # zeros for every component (not optimal). If we encounter such a case we set
256 # all components to nan instead in order to be able to quickly filter for
257 # this case when analyzing data.
258 for s in structures[stype]:
259 if 'bec_target' in s.info and np.allclose(s.info['bec_target'], 0):
260 nat = len(s)
261 size = 9
262 s.info['bec_target'] = np.array(size * nat * [np.nan]).reshape(nat, size)
264 return structures['train'], structures['test']
267def _read_data_file(dirname: str, fname: str, includes_target: bool = True):
268 """Private function that parses *.out files and
269 returns their content for further processing.
270 """
271 path = join_path(dirname, fname)
272 if not exists(path):
273 raise FileNotFoundError(f'File {path} does not exist')
274 with open(path, 'r') as f:
275 lines = f.readlines()
276 target, predicted = [], []
277 for line in lines:
278 flds = line.split()
279 if includes_target:
280 if len(flds) % 2 != 0:
281 raise ValueError(f'Incorrect number of columns in {path} ({len(flds)}).')
282 n = len(flds) // 2
283 predicted.append([float(s) for s in flds[:n]])
284 target.append([float(s) for s in flds[n:]])
285 else:
286 predicted.append([float(s) for s in flds])
287 target = None
288 if target is not None:
289 target = np.array(target)
290 predicted = np.array(predicted)
291 return target, predicted
294def get_parity_data(
295 structures: list[Atoms],
296 property: str,
297 selection: list[str] = None,
298 flatten: bool = True,
299) -> DataFrame:
300 """Returns the predicted and target energies, forces, virials or stresses
301 from a list of structures in a format suitable for generating parity plots.
303 The structures should have been read using :func:`read_structures
304 <calorine.nep.read_structures>`, such that the `info` object is
305 populated with keys of the form `<property>_<type>` where `<property>`
306 is, e.g., `energy` or `force` and `<type>` is one of `predicted` or `target`.
308 The resulting parity data is returned as a tuple of dicts, where each entry
309 corresponds to a list.
311 Parameters
312 ----------
313 structures
314 List of structures as read with :func:`read_structures <calorine.nep.read_structures>`.
315 property
316 One of `energy`, `force`, `virial`, `stress`, `bec`, `dipole`, or `polarizability`.
317 selection
318 A list containing which components to return, and/or the norm.
319 Possible values are `x`, `y`, `z`, `xx`, `yy`,
320 `zz`, `yz`, `xz`, `xy`, `norm`, `pressure`.
321 flatten
322 if True return flattened lists; this is useful for flattening
323 the components of force or virials into a simple list
324 """
325 voigt_mapping = {
326 'x': 0, 'y': 1, 'z': 2, 'xx': 0, 'yy': 1, 'zz': 2, 'yz': 3, 'xz': 4, 'xy': 5,
327 }
328 if property not in ('energy', 'force', 'virial', 'stress', 'polarizability', 'dipole', 'bec'):
329 raise ValueError(
330 "`property` must be one of 'energy', 'force', 'virial', 'stress',"
331 " 'polarizability', 'dipole', or 'bec'."
332 )
333 if property in ['energy'] and selection:
334 raise ValueError('Selection cannot be applied to scalars.')
335 if property != 'stress' and selection and 'pressure' in selection:
336 raise ValueError(f'Cannot calculate pressure for `{property}`.')
338 data = {'predicted': [], 'target': []}
339 if property in ['force', 'bec'] and flatten:
340 size = 3 if property == 'force' else 9
341 data['species'] = []
342 for structure in structures:
343 if 'species' in data:
344 data['species'].extend(np.repeat(structure.symbols, size).tolist())
345 for stype in ['predicted', 'target']:
346 property_with_stype = f'{property}_{stype}'
347 if property_with_stype not in structure.info.keys():
348 raise KeyError(f'{property_with_stype} not available in info field of structure')
349 extracted_property = np.array(structure.info[property_with_stype])
351 if selection is None or len(selection) == 0:
352 data[stype].append(extracted_property)
353 continue
355 selected_values = []
356 for select in selection:
357 if property in ['force', 'bec']:
358 # flip to get (n_components, n_structures)
359 extracted_property = extracted_property.T
360 if select == 'norm':
361 if property == 'force':
362 selected_values.append(np.linalg.norm(extracted_property, axis=0))
363 elif property in ['virial', 'stress']:
364 full_tensor = voigt_6_to_full_3x3_stress(extracted_property)
365 selected_values.append(np.linalg.norm(full_tensor))
366 elif property in ['dipole']:
367 selected_values.append(np.linalg.norm(extracted_property))
368 else:
369 raise ValueError(
370 f'Cannot handle selection=`norm` with property=`{property}`.')
371 continue
373 if select == 'pressure' and property == 'stress':
374 total_stress = extracted_property
375 selected_values.append(-np.sum(total_stress[:3]) / 3)
376 continue
378 if select not in voigt_mapping:
379 raise ValueError(f'Selection `{select}` is not allowed.')
380 index = voigt_mapping[select]
381 if index >= extracted_property.shape[0]:
382 raise ValueError(
383 f'Selection `{select}` is not compatible with property `{property}`.'
384 )
385 selected_values.append(extracted_property[index])
387 data[stype].append(selected_values)
388 if flatten:
389 for stype in ['target', 'predicted']:
390 value = data[stype]
391 if len(np.shape(value[0])) > 0:
392 data[stype] = np.concatenate(value).ravel().tolist()
393 if property in ['force']:
394 n = len(data['target']) // 3
395 data['component'] = ['x', 'y', 'z'] * n
396 elif property in ['virial', 'stress']:
397 n = len(data['target']) // 6
398 data['component'] = ['xx', 'yy', 'zz', 'yz', 'xz', 'xy'] * n
399 elif property in ['bec']:
400 n = len(data['target']) // 9
401 data['component'] = ['xx', 'xy', 'xz', 'yx', 'yy', 'yz', 'zx', 'zy', 'zz'] * n
402 df = DataFrame(data)
403 # In case of flatten, cast to float64 for compatibility
404 # with e.g. seaborn.
405 # Casting in this way breaks tensorial properties though,
406 # so skip it there.
407 if flatten:
408 df['target'] = df.target.astype('float64')
409 df['predicted'] = df.predicted.astype('float64')
410 return df