Coverage for calorine / nep / io.py: 100%
230 statements
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-15 13:48 +0000
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-15 13:48 +0000
1from os.path import exists
2from os.path import join as join_path
3from typing import Any, Iterable, NamedTuple, TextIO
4from warnings import warn
6import numpy as np
7from ase import Atoms
8from ase.io import read, write
9from ase.stress import voigt_6_to_full_3x3_stress
10from pandas import DataFrame
13def read_loss(filename: str) -> DataFrame:
14 """Parses a file in `loss.out` format from GPUMD and returns the
15 content as a data frame. More information concerning file format,
16 content and units can be found `here
17 <https://gpumd.org/nep/output_files/loss_out.html>`__.
19 Parameters
20 ----------
21 filename
22 input file name
24 """
25 data = np.loadtxt(filename)
26 if isinstance(data[0], np.float64):
27 # If only a single row in loss.out, append a dimension
28 data = data.reshape(1, -1)
29 if len(data[0]) == 6:
30 tags = 'total_loss L1 L2'
31 tags += ' RMSE_P_train'
32 tags += ' RMSE_P_test'
33 elif len(data[0]) == 10:
34 tags = 'total_loss L1 L2'
35 tags += ' RMSE_E_train RMSE_F_train RMSE_V_train'
36 tags += ' RMSE_E_test RMSE_F_test RMSE_V_test'
37 elif len(data[0]) == 14:
38 tags = 'total_loss L1 L2'
39 tags += ' RMSE_E_train RMSE_F_train RMSE_V_train RMSE_Q_train RMSE_Z_train'
40 tags += ' RMSE_E_test RMSE_F_test RMSE_V_test RMSE_Q_test RMSE_Z_test'
41 else:
42 raise ValueError(
43 f'Input file contains {len(data[0])} data columns. Expected 6 or 10 columns.'
44 )
45 generations = range(100, len(data) * 100 + 1, 100)
46 df = DataFrame(data=data[:, 1:], columns=tags.split(), index=generations)
47 return df
50def _write_structure_in_nep_format(structure: Atoms, f: TextIO) -> None:
51 """Write structure block into a file-like object in format readable by nep executable.
53 Parameters
54 ----------
55 structure
56 input structure; must hold information regarding energy and forces
57 f
58 file-like object to which to write
59 """
61 # Allowed keyword=value pairs. Use ASEs extyz write functionality.:
62 # lattice="ax ay az bx by bz cx cy cz" (mandatory)
63 # energy=energy_value (mandatory)
64 # virial="vxx vxy vxz vyx vyy vyz vzx vzy vzz" (optional)
65 # weight=relative_weight (optional)
66 # properties=property_name:data_type:number_of_columns
67 # species:S:1 (mandatory)
68 # pos:R:3 (mandatory)
69 # force:R:3 or forces:R:3 (mandatory)
70 try:
71 structure.get_potential_energy()
72 structure.get_forces() # calculate forces to have them on the Atoms object
73 except RuntimeError:
74 raise RuntimeError('Failed to retrieve energy and/or forces for structure')
75 if np.isclose(structure.get_volume(), 0):
76 raise ValueError('Structure cell must have a non-zero volume!')
77 try:
78 structure.get_stress()
79 except RuntimeError:
80 warn('Failed to retrieve stresses for structure')
81 write(filename=f, images=structure, write_info=True, format='extxyz')
84def write_structures(outfile: str, structures: list[Atoms]) -> None:
85 """Writes structures for training/testing in format readable by nep executable.
87 Parameters
88 ----------
89 outfile
90 output filename
91 structures
92 list of structures with energy, forces, and (possibly) stresses
93 """
94 with open(outfile, 'w') as f:
95 for structure in structures:
96 _write_structure_in_nep_format(structure, f)
99def write_nepfile(parameters: NamedTuple, dirname: str) -> None:
100 """Writes parameters file for NEP construction.
102 Parameters
103 ----------
104 parameters
105 input parameters; see `here <https://gpumd.org/nep/input_parameters/index.html>`__
106 dirname
107 directory in which to place input file and links
108 """
109 with open(join_path(dirname, 'nep.in'), 'w') as f:
110 for key, val in parameters.items():
111 f.write(f'{key} ')
112 if isinstance(val, Iterable):
113 f.write(' '.join([f'{v}' for v in val]))
114 else:
115 f.write(f'{val}')
116 f.write('\n')
119def read_nepfile(filename: str) -> dict[str, Any]:
120 """Returns the content of a configuration file (`nep.in`) as a dictionary.
122 Parameters
123 ----------
124 filename
125 input file name
126 """
127 int_vals = ['version', 'neuron', 'generation', 'batch', 'population',
128 'mode', 'model_type', 'charge_mode']
129 float_vals = ['lambda_1', 'lambda_2', 'lambda_e', 'lambda_f', 'lambda_v',
130 'lambda_q', 'lambda_shear', 'force_delta']
131 settings = {}
132 with open(filename) as f:
133 for line in f.readlines():
134 # remove comments - throw away everything after a '#'
135 cleaned = line.split('#', 1)[0].strip()
136 flds = cleaned.split()
137 if len(flds) == 0:
138 continue
139 settings[flds[0]] = ' '.join(flds[1:])
140 for key, val in settings.items():
141 if key in int_vals:
142 settings[key] = int(val)
143 elif key in float_vals:
144 settings[key] = float(val)
145 elif key in ['cutoff', 'n_max', 'l_max', 'basis_size', 'zbl', 'type_weight']:
146 settings[key] = [float(v) for v in val.split()]
147 elif key == 'type':
148 types = val.split()
149 types[0] = int(types[0])
150 settings[key] = types
151 return settings
154def read_structures(dirname: str) -> tuple[list[Atoms], list[Atoms]]:
155 """Parses the output files with training and test data from a nep run and returns their
156 content as two lists of structures, representing training and test data, respectively.
157 Target and predicted data are included in the :attr:`info` dict of the :class:`Atoms`
158 objects.
160 Parameters
161 ----------
162 dirname
163 Directory from which to read output files.
165 """
166 path = join_path(dirname)
167 if not exists(path):
168 raise FileNotFoundError(f'Directory {path} does not exist')
170 # fetch model type from nep input file
171 nep_info = read_nepfile(f'{path}/nep.in')
172 model_type = nep_info.get('model_type', 0)
174 # set up which files to parse, what dimensions to expect etc
175 # depending on the type of model that is parsed
176 if model_type == 0:
177 charge_mode = int(nep_info.get('charge_mode', 0))
178 if charge_mode not in [0, 1, 2]:
179 raise ValueError(f'Unknown charge_mode: {charge_mode}')
180 # files to parse: (sname, size, mandatory, includes_target, per_atom)
181 files_to_parse = [
182 ('energy', 1, True, True, False),
183 ('force', 3, True, True, True),
184 ('virial', 6, True, True, False),
185 ('stress', 6, True, True, False),
186 ]
187 if charge_mode in [1, 2]:
188 # files to parse: (sname, size, includes_target, per_atom)
189 files_to_parse += [
190 ('charge', 1, True, False, True),
191 ('bec', 9, False, True, True),
192 ]
193 elif model_type == 1:
194 # files to parse: (sname, size, includes_target, per_atom)
195 files_to_parse = [('dipole', 3, True, True, False)]
196 elif model_type == 2:
197 # files to parse: (sname, size, includes_target, per_atom)
198 files_to_parse = [('polarizability', 6, True, True, False)]
199 else:
200 raise ValueError(f'Unknown model_type: {model_type}')
202 # read training and test data
203 structures = {}
204 for stype in ['train', 'test']:
205 filename = join_path(dirname, f'{stype}.xyz')
206 try:
207 structures[stype] = read(filename, format='extxyz', index=':')
208 except FileNotFoundError:
209 warn(f'File {filename} not found.')
210 structures[stype] = []
211 continue
213 n_structures = len(structures[stype])
215 # loop over files from which to read target data and predictions
216 for sname, size, mandatory, includes_target, per_atom in files_to_parse:
217 infile = f'{sname}_{stype}.out'
218 path = join_path(dirname, infile)
219 if not exists(path):
220 if mandatory:
221 raise FileNotFoundError(f'File {path} does not exist')
222 else:
223 continue
224 ts, ps = _read_data_file(path, includes_target=includes_target)
226 if ts is not None:
227 if ts.shape[1] != size:
228 raise ValueError(f'Target data in {infile} has unexpected shape:'
229 f' {ts.shape} (expected: (-1, {size}))')
230 if ps.shape[1] != size:
231 raise ValueError(f'Predicted data in {infile} has unexpected shape:'
232 f' {ps.shape} (expected: (-1, {size}))')
234 if per_atom:
235 # data per-atom, e.g., forces, per-atom-virials, Born effective charges ...
236 n_atoms_total = sum([len(s) for s in structures[stype]])
237 if len(ps) != n_atoms_total:
238 raise ValueError(f'Number of atoms in {infile} ({len(ps)})'
239 f' and {stype}.xyz ({n_atoms_total}) inconsistent.')
240 n = 0
241 for structure in structures[stype]:
242 nat = len(structure)
243 if ts is not None:
244 structure.info[f'{sname}_target'] = \
245 np.array(ts[n: n + nat]).reshape(nat, size)
246 structure.info[f'{sname}_predicted'] = \
247 np.array(ps[n: n + nat]).reshape(nat, size)
248 n += nat
249 else:
250 # data per structure, e.g., energy, virials, stress
251 if len(ps) != n_structures:
252 raise ValueError(f'Number of structures in {infile} ({len(ps)})'
253 f' and {stype}.xyz ({n_structures}) inconsistent.')
254 for k, structure in enumerate(structures[stype]):
255 assert ts is not None, 'This should not occur. Please report.'
256 t = ts[k]
257 assert np.shape(t) == (size,)
258 structure.info[f'{sname}_target'] = t
259 p = ps[k]
260 assert np.shape(p) == (size,)
261 structure.info[f'{sname}_predicted'] = p
263 # special handling of target data for BECs
264 # The target data for BECs need not be complete. In this case nep writes
265 # zeros for every component (not optimal). If we encounter such a case we set
266 # all components to nan instead in order to be able to quickly filter for
267 # this case when analyzing data.
268 for s in structures[stype]:
269 if 'bec_target' in s.info and np.allclose(s.info['bec_target'], 0):
270 nat = len(s)
271 size = 9
272 s.info['bec_target'] = np.array(size * nat * [np.nan]).reshape(nat, size)
274 return structures['train'], structures['test']
277def _read_data_file(
278 path: str,
279 includes_target: bool = True,
280):
281 """Private function that parses *.out files and
282 returns their content for further processing.
283 """
284 with open(path, 'r') as f:
285 lines = f.readlines()
286 target, predicted = [], []
287 for line in lines:
288 flds = line.split()
289 if includes_target:
290 if len(flds) % 2 != 0:
291 raise ValueError(f'Incorrect number of columns in {path} ({len(flds)}).')
292 n = len(flds) // 2
293 predicted.append([float(s) for s in flds[:n]])
294 target.append([float(s) for s in flds[n:]])
295 else:
296 predicted.append([float(s) for s in flds])
297 target = None
298 if target is not None:
299 target = np.array(target)
300 predicted = np.array(predicted)
301 return target, predicted
304def get_parity_data(
305 structures: list[Atoms],
306 property: str,
307 selection: list[str] = None,
308 flatten: bool = True,
309) -> DataFrame:
310 """Returns the predicted and target energies, forces, virials or stresses
311 from a list of structures in a format suitable for generating parity plots.
313 The structures should have been read using :func:`read_structures
314 <calorine.nep.read_structures>`, such that the `info` object is
315 populated with keys of the form `<property>_<type>` where `<property>`
316 is, e.g., `energy` or `force` and `<type>` is one of `predicted` or `target`.
318 The resulting parity data is returned as a tuple of dicts, where each entry
319 corresponds to a list.
321 Parameters
322 ----------
323 structures
324 List of structures as read with :func:`read_structures <calorine.nep.read_structures>`.
325 property
326 One of `energy`, `force`, `virial`, `stress`, `bec`, `dipole`, or `polarizability`.
327 selection
328 A list containing which components to return, and/or the norm.
329 Possible values are `x`, `y`, `z`, `xx`, `yy`,
330 `zz`, `yz`, `xz`, `xy`, `norm`, `pressure`.
331 flatten
332 if True return flattened lists; this is useful for flattening
333 the components of force or virials into a simple list
334 """
335 voigt_mapping = {
336 'x': 0, 'y': 1, 'z': 2, 'xx': 0, 'yy': 1, 'zz': 2, 'yz': 3, 'xz': 4, 'xy': 5,
337 }
338 if property not in ('energy', 'force', 'virial', 'stress', 'polarizability', 'dipole', 'bec'):
339 raise ValueError(
340 "`property` must be one of 'energy', 'force', 'virial', 'stress',"
341 " 'polarizability', 'dipole', or 'bec'."
342 )
343 if property in ['energy'] and selection:
344 raise ValueError('Selection cannot be applied to scalars.')
345 if property != 'stress' and selection and 'pressure' in selection:
346 raise ValueError(f'Cannot calculate pressure for `{property}`.')
348 data = {'predicted': [], 'target': []}
349 if property in ['force', 'bec'] and flatten:
350 size = 3 if property == 'force' else 9
351 data['species'] = []
352 for structure in structures:
353 if 'species' in data:
354 data['species'].extend(np.repeat(structure.symbols, size).tolist())
355 for stype in ['predicted', 'target']:
356 property_with_stype = f'{property}_{stype}'
357 if property_with_stype not in structure.info.keys():
358 raise KeyError(f'{property_with_stype} not available in info field of structure')
359 extracted_property = np.array(structure.info[property_with_stype])
361 if selection is None or len(selection) == 0:
362 data[stype].append(extracted_property)
363 continue
365 selected_values = []
366 for select in selection:
367 if property in ['force', 'bec']:
368 # flip to get (n_components, n_structures)
369 extracted_property = extracted_property.T
370 if select == 'norm':
371 if property == 'force':
372 selected_values.append(np.linalg.norm(extracted_property, axis=0))
373 elif property in ['virial', 'stress']:
374 full_tensor = voigt_6_to_full_3x3_stress(extracted_property)
375 selected_values.append(np.linalg.norm(full_tensor))
376 elif property in ['dipole']:
377 selected_values.append(np.linalg.norm(extracted_property))
378 else:
379 raise ValueError(
380 f'Cannot handle selection=`norm` with property=`{property}`.')
381 continue
383 if select == 'pressure' and property == 'stress':
384 total_stress = extracted_property
385 selected_values.append(-np.sum(total_stress[:3]) / 3)
386 continue
388 if select not in voigt_mapping:
389 raise ValueError(f'Selection `{select}` is not allowed.')
390 index = voigt_mapping[select]
391 if index >= extracted_property.shape[0]:
392 raise ValueError(
393 f'Selection `{select}` is not compatible with property `{property}`.'
394 )
395 selected_values.append(extracted_property[index])
397 data[stype].append(selected_values)
398 if flatten:
399 for stype in ['target', 'predicted']:
400 value = data[stype]
401 if len(np.shape(value[0])) > 0:
402 data[stype] = np.concatenate(value).ravel().tolist()
403 if property in ['force']:
404 n = len(data['target']) // 3
405 data['component'] = ['x', 'y', 'z'] * n
406 elif property in ['virial', 'stress']:
407 n = len(data['target']) // 6
408 data['component'] = ['xx', 'yy', 'zz', 'yz', 'xz', 'xy'] * n
409 elif property in ['bec']:
410 n = len(data['target']) // 9
411 data['component'] = ['xx', 'xy', 'xz', 'yx', 'yy', 'yz', 'zx', 'zy', 'zz'] * n
412 df = DataFrame(data)
413 # In case of flatten, cast to float64 for compatibility
414 # with e.g. seaborn.
415 # Casting in this way breaks tensorial properties though,
416 # so skip it there.
417 if flatten:
418 df['target'] = df.target.astype('float64')
419 df['predicted'] = df.predicted.astype('float64')
420 return df