diff --git a/examples/Generator/inverse_trainer_xyz.py b/examples/Generator/inverse_trainer_xyz.py index b18619d6..ef063340 100644 --- a/examples/Generator/inverse_trainer_xyz.py +++ b/examples/Generator/inverse_trainer_xyz.py @@ -11,6 +11,9 @@ from ppafm.ocl.AFMulator import AFMulator from ppafm.ocl.oclUtils import init_env +# from ppafm.logging_utils import configure_logging +# configure_logging(log_performance=True) + class ExampleTrainer(InverseAFMtrainer): # We override this callback method in order to augment the samples with randomized tip distance and tilt @@ -57,7 +60,6 @@ def on_sample_start(self): QZs=[[0.1, 0, -0.1, 0]], Qs=[[-10, 20, -10, 0]], ) - # trainer.bRuntime = True # Augment molecule list with rotations and shuffle trainer.augment_with_rotations_entropy(sphereTangentSpace(n=100), 30) diff --git a/examples/pyridineDensOverlap/run_gpu.py b/examples/pyridineDensOverlap/run_gpu.py index 8f0f5919..613a37ac 100755 --- a/examples/pyridineDensOverlap/run_gpu.py +++ b/examples/pyridineDensOverlap/run_gpu.py @@ -8,11 +8,12 @@ import ppafm.ocl.field as FFcl import ppafm.ocl.oclUtils as oclu +from ppafm.logging_utils import configure_logging from ppafm.ocl.AFMulator import AFMulator # Initialize an OpenCL environment. You can change i_platform to select the device to use oclu.init_env(i_platform=0) -FFcl.bRuntime = True # Print timings +configure_logging(log_performance=True) # Print timings # Load all input files rho_tip, xyzs_tip, Zs_tip = FFcl.TipDensity.from_file("tip/density_CO.xsf") diff --git a/ppafm/GUIWidgets.py b/ppafm/GUIWidgets.py index 710f396c..7e54fd40 100644 --- a/ppafm/GUIWidgets.py +++ b/ppafm/GUIWidgets.py @@ -8,6 +8,10 @@ from PyQt5 import QtCore, QtWidgets from . import io +from .logging_utils import get_logger, get_perf_logger + +logger = get_logger("GUIWidgets") +perf_logger = get_perf_logger("GUIWidgets") matplotlib.use("Qt5Agg") @@ -118,19 +122,17 @@ class FigImshow(FigCanvas): cbar = None - def __init__(self, parentWiget=None, parentApp=None, width=5, height=4, dpi=100, verbose=0): + def __init__(self, parentWiget=None, parentApp=None, width=5, height=4, dpi=100): super(self.__class__, self).__init__(parentWiget=parentWiget, parentApp=parentApp, width=width, height=height, dpi=dpi) self.fig.canvas.mpl_connect("button_press_event", self.onclick) self.fig.canvas.mpl_connect("scroll_event", self.onscroll) - self.verbose = verbose self.img = None self.cbar = None def plotSlice(self, F_stack, z_slice, title=None, margins=None, grid_selector=0, slice_length=None, points=[], cbar_range=None, extent=None): F = F_stack[z_slice] - if self.verbose > 0: - print("plotSlice F.shape, F.min(), F.max(), margins", F.shape, F.min(), F.max(), margins) + logger.debug(f"plotSlice F.shape, F.min(), F.max(), margins {F.shape} {F.min()} {F.max()} {margins}") if self.img is None or self.img.get_array().shape != F.shape: self.axes.cla() @@ -143,8 +145,7 @@ def plotSlice(self, F_stack, z_slice, title=None, margins=None, grid_selector=0, for line in self.axes.lines: line.remove() self.axes.set_prop_cycle(None) # Reset color cycle - if self.verbose > 0: - print("plotSlice: reset points") + logger.debug("plotSlice: reset points") else: for p, (ix, iy) in zip(self.axes.lines, points): x = np.atleast_1d(ix) @@ -155,8 +156,7 @@ def plotSlice(self, F_stack, z_slice, title=None, margins=None, grid_selector=0, if self.cbar == None: self.cbar = self.fig.colorbar(self.img, ax=self.axes) self.cbar.set_label("df (Hz)") - if self.verbose > 0: - print("plotSlice: added colorbar") + logger.debug("plotSlice: added colorbar") self.img.set_clim(vmin=cbar_range[0], vmax=cbar_range[1]) self.cbar.mappable.set_clim(vmin=cbar_range[0], vmax=cbar_range[1]) else: @@ -164,8 +164,7 @@ def plotSlice(self, F_stack, z_slice, title=None, margins=None, grid_selector=0, if self.cbar is not None: self.cbar.remove() self.cbar = None - if self.verbose > 0: - print("plotSlice: removed colorbar") + logger.debug("plotSlice: removed colorbar") if extent is not None: self.img.set_extent(extent) @@ -197,9 +196,9 @@ def plotSlice2(self, F_stack, title=None, margins=None, grid_selector=0, slice_l self.axes.cla() F = F_stack - print("plotSlice F.shape, F.min(), F.max() ", F.shape, F.min(), F.max()) + logger.debug(f"plotSlice F.shape, F.min(), F.max() {F.shape} {F.min()} {F.max()}") - print("self.margins", margins) + logger.debug(f"self.margins {margins}") if alpha > 0 and big_len_image is not None: F = F * (1 - alpha) + big_len_image * alpha self.img = self.axes.imshow(F, origin="lower", cmap="viridis", interpolation="bicubic") @@ -232,8 +231,7 @@ def onclick(self, event): x = float(event.xdata) y = float(event.ydata) except TypeError: - if self.verbose > 0: - print("Invalid click event.") + logger.warning("Invalid click event.") return self.axes.plot([x], [y], "o", scalex=False, scaley=False) self.draw() @@ -247,15 +245,14 @@ def onscroll(self, event): x = float(event.xdata) y = float(event.ydata) except TypeError: - if self.verbose > 0: - print("Invalid scroll event.") + logger.warning("Invalid scroll event.") return if event.button == "up": direction = "in" elif event.button == "down": direction = "out" else: - print(f"Invalid scroll direction {event.button}") + logger.warning(f"Invalid scroll direction {event.button}") return self.parent.zoomTowards(x, y, direction) @@ -325,7 +322,7 @@ def save_dat(self): fileName, _ = QtWidgets.QFileDialog.getSaveFileName(self, "Save df curve raw data", default_path, "Data files (*.dat)") if fileName: fileName = correct_ext(fileName, ".dat") - print("saving data to :", fileName) + logger.info(f"Saving data to: {fileName}") data = [] data.append(np.array(self.figCan.axes.lines[0].get_xdata())) for line in self.figCan.axes.lines: @@ -338,7 +335,7 @@ def save_png(self): fileName, _ = QtWidgets.QFileDialog.getSaveFileName(self, "Save df curve image", default_path, "Image files (*.png)") if fileName: fileName = correct_ext(fileName, ".png") - print("saving image to :", fileName) + logger.info(f"Saving image to: {fileName}") self.figCan.fig.savefig(fileName, bbox_inches="tight") def clearFig(self): @@ -363,7 +360,7 @@ def setRange(self): except: pass self.figCan.draw() - print("range: ", xmin, xmax, ymin, ymax) + logger.debug(f"range: {xmin} {xmax} {ymin} {ymax}") # ======================= @@ -511,9 +508,8 @@ def updateParent(self): class FFViewer(SlaveWindow): - def __init__(self, parent=None, title="View Forcefield", width=5, height=4, dpi=100, verbose=0): + def __init__(self, parent=None, title="View Forcefield", width=5, height=4, dpi=100): super().__init__(parent=parent, title=title) - self.verbose = verbose self.figCan = FigImshow(parent, width=width, height=height, dpi=dpi) self.centralLayout.addWidget(self.figCan) @@ -576,8 +572,7 @@ def updateFF(self): if not self.isVisible(): set_widget_value(self.bxInd, iz) - if self.verbose > 0: - print("FFViewer.updateFF", self.FE.shape, iz, self.z_step, self.z_min) + logger.debug(f"FFViewer.updateFF {self.FE.shape} {iz} {self.z_step} {self.z_min}") def updateView(self): t0 = time.perf_counter() @@ -588,10 +583,8 @@ def updateView(self): data = self.FE[..., ic].transpose(2, 1, 0) self.figCan.plotSlice(data, iz, title=f"z = {z:.2f}Å") - if self.verbose > 0: - print("FFViewer.updateView", ic, iz, data.shape) - if self.verbose > 1: - print("updateView time [s]", time.perf_counter() - t0) + logger.debug(f"FFViewer.updateView {ic} {iz} {data.shape}") + perf_logger.info(f"updateView time [s] {time.perf_counter() - t0}") def saveFF(self): comp = self.slComponent.currentText() @@ -602,12 +595,11 @@ def saveFF(self): ext = os.path.splitext(fileName)[1] if ext != ".xsf": self.parent.status_message("Unsupported file type in force field save file path") - print(f"Unsupported file type in force field save file path `{fileName}`") + logger.error(f"Unsupported file type in force field save file path `{fileName}`") return self.parent.status_message("Saving data...") - if self.verbose > 0: - print(f"Saving force field data to {fileName}...") + logger.debug(f"Saving force field data to {fileName}...") ic = self.slComponent.currentIndex() data = self.FE.copy() # Clamp large values for easier visualization @@ -620,10 +612,9 @@ def saveFF(self): lvec = self.parent.afmulator.lvec xyzs = self.parent.xyzs - lvec[0] atomstring = io.primcoords2Xsf(self.parent.Zs, xyzs.T, lvec) - io.saveXSF(fileName, data, lvec, head=atomstring, verbose=0) + io.saveXSF(fileName, data, lvec, head=atomstring) - if self.verbose > 0: - print("Done saving force field data.") + logger.info("Done saving force field data.") self.parent.status_message("Ready") diff --git a/ppafm/GridUtils.py b/ppafm/GridUtils.py index 845eab59..0fa9197c 100644 --- a/ppafm/GridUtils.py +++ b/ppafm/GridUtils.py @@ -5,6 +5,9 @@ import numpy as np from . import cpp_utils +from .logging_utils import get_logger + +logger = get_logger("GridUtils") # ============================== interface to C++ core @@ -97,7 +100,7 @@ def interpolate_cartesian(F, pos, cell=None, result=None): if cell is not None: setGridCell(cell) nDim = np.array(pos.shape) - print(nDim) + logger.debug(nDim) if result is None: result = np.zeros((nDim[0], nDim[1], nDim[2])) n = nDim[0] * nDim[1] * nDim[2] @@ -120,9 +123,9 @@ def dens2Q_CHGCARxsf(data, lvec): nDim = data.shape Ntot = nDim[0] * nDim[1] * nDim[2] Vtot = np.linalg.det(lvec[1:]) - print("dens2Q Volume : ", Vtot) - print("dens2Q Ntot : ", Ntot) - print("dens2Q Vtot/Ntot : ", Vtot / Ntot) + logger.debug(f"dens2Q Volume : {Vtot}") + logger.debug(f"dens2Q Ntot : {Ntot}") + logger.debug(f"dens2Q Vtot/Ntot : {Vtot / Ntot}") # Qsum = rho1.sum() return Vtot / Ntot diff --git a/ppafm/HighLevel.py b/ppafm/HighLevel.py index f150bd8f..f9b1918d 100755 --- a/ppafm/HighLevel.py +++ b/ppafm/HighLevel.py @@ -9,8 +9,9 @@ from . import fieldFFT as fFFT from . import io from .defaults import d3 +from .logging_utils import get_logger -verbose = 1 +logger = get_logger("HighLevel") # ===== constants Fmax_DEFAULT = 10.0 @@ -60,10 +61,8 @@ def shift_positions(R, s): def relaxedScan3D(xTips, yTips, zTips, trj=None, bF3d=False): - if verbose > 0: - print(">>BEGIN: relaxedScan3D()") - if verbose > 0: - print(" zTips : ", zTips) + logger.info(">>BEGIN: relaxedScan3D()") + logger.debug(f" zTips : {zTips}") ntips = len(zTips) rTips = np.zeros((ntips, 3)) rs = np.zeros((ntips, 3)) @@ -98,16 +97,13 @@ def relaxedScan3D(xTips, yTips, zTips, trj=None, bF3d=False): PPpos[:, iy, ix, 0] = rs[::-1, 0] PPpos[:, iy, ix, 1] = rs[::-1, 1] PPpos[:, iy, ix, 2] = rs[::-1, 2] - if verbose > 0: - print("<< 0: - print(">>BEGIN: relaxedScan3D_omp()") - if verbose > 0: - print(" zTips : ", zTips) + logger.info(">>BEGIN: relaxedScan3D_omp()") + logger.debug(f" zTips : {zTips}") nz = len(zTips) ny = len(yTips) nx = len(xTips) @@ -126,8 +122,7 @@ def relaxedScan3D_omp(xTips, yTips, zTips, trj=None, bF3d=False, tip_spline=None fzs = fs[:, :, ::-1, :].transpose(2, 1, 0, 3).copy() else: fzs = fs[:, :, ::-1, 2].transpose(2, 1, 0).copy() - if verbose > 0: - print("<< 0: - print(">>>BEGIN: perform_relaxation()") + logger.info(">>>BEGIN: perform_relaxation()") xTips, yTips, zTips, lvecScan = PPU.prepareScanGrids(parameters=parameters) FF = FFLJ.copy() if FFel is not None: FF += FFel * parameters.charge - if verbose > 0: - print("adding charge:", parameters.charge) + logger.debug(f"adding charge: {parameters.charge}") if FFkpfm_t0sV is not None and FFkpfm_tVs0 is not None: FF += (parameters.charge * FFkpfm_t0sV - FFkpfm_tVs0) * parameters.Vbias - if verbose > 0: - print("adding charge:", parameters.charge, "and bias:", parameters.Vbias, "V") + logger.debug(f"adding charge: {parameters.charge} and bias: {parameters.Vbias} V") if FFpauli is not None: FF += FFpauli * parameters.Apauli if FFboltz != None: @@ -167,8 +159,7 @@ def perform_relaxation( core.setFF_Fpointer(FF) if (np.array(parameters.stiffness) < 0.0).any(): parameters.stiffness = np.array([parameters.klat, parameters.klat, parameters.krad]) - if verbose > 0: - print("stiffness:", parameters.stiffness) + logger.debug(f"stiffness: {parameters.stiffness}") core.setTip(kSpring=np.array((parameters.stiffness[0], parameters.stiffness[1], 0.0)) / -PPU.eVA_Nm, kRadial=parameters.stiffness[2] / -PPU.eVA_Nm, parameters=parameters) # grid origin has to be moved to zero, hence the subtraction of lvec[0,:] from trj and xTip, yTips, zTips @@ -189,8 +180,7 @@ def perform_relaxation( PPdisp -= init_pos else: PPdisp = None - if verbose > 0: - print("<< 0: - print(">>>BEGIN: computeLJ()") + logger.info(">>>BEGIN: computeLJ()") # --- load species (LJ potential) FFparams = PPU.loadSpecies(speciesFile) elem_dict = PPU.getFFdict(FFparams) - # print elem_dict # --- load atomic geometry atoms, nDim, lvec = io.loadGeometry(geomFile, format=geometry_format, parameters=parameters) atomstring = io.primcoords2Xsf(PPU.atoms2iZs(atoms[0], elem_dict), [atoms[1], atoms[2], atoms[3]], lvec) - if verbose > 0: - print(parameters.gridN, parameters.gridO, parameters.gridA, parameters.gridB, parameters.gridC) + logger.debug(f"{parameters.gridN}, {parameters.gridO}, {parameters.gridA}, {parameters.gridB}, {parameters.gridC}") iZs, Rs, Qs = PPU.parseAtoms(atoms, elem_dict, autogeom=False, PBC=parameters.PBC, lvec=lvec, parameters=parameters) # --- prepare LJ parameters iPP = PPU.atom2iZ(parameters.probeType, elem_dict) # --- prepare arrays and compute FF, V = prepareArrays(None, computeVpot, parameters=parameters) - if verbose > 0: - print("FFLJ.shape", FF.shape) + logger.debug(f"FFLJ.shape {FF.shape}") core.setFF_shape(np.shape(FF), lvec, parameters=parameters) # shift atoms to the coordinate system in which the grid origin is zero @@ -258,22 +244,18 @@ def computeLJ(geomFile, speciesFile, geometry_format=None, save_format=None, com core.getLennardJonesFF(Rs0, cLJs) # THE MAIN STUFF HERE # --- post porces FFs if Fmax is not None: - if verbose > 0: - print("Clamp force >", Fmax) + logger.debug(f"Clamp force > {Fmax}") io.limit_vec_field(FF, Fmax=Fmax) if (Vmax is not None) and computeVpot: - if verbose > 0: - print("Clamp potential >", Vmax) + logger.debug(f"Clamp potential > {Vmax}") V[V > Vmax] = Vmax # remove too large values # --- save to files ? if save_format is not None: - if verbose > 0: - print("computeLJ Save ", save_format) + logger.info(f"Saving Lennard-Jones force field to {save_format}") io.save_vec_field("FF" + ffModel, FF, lvec, data_format=save_format, head=atomstring, atomic_info=(atoms[:4], lvec)) if computeVpot: io.save_scal_field("E" + ffModel, V, lvec, data_format=save_format, head=atomstring, atomic_info=(atoms[:4], lvec)) - if verbose > 0: - print("<< 0: - print(">>>BEGIN: computeELFF_pointCharge()") + logger.info(">>>BEGIN: computeELFF_pointCharge()") tipKinds = {"s": 0, "pz": 1, "dz2": 2} tipKind = tipKinds[tip] - if verbose > 0: - print(" ========= get electrostatic forcefiled from the point charges tip=%s %i " % (tip, tipKind)) + logger.debug(f" ========= get electrostatic forcefield from the point charges tip={tip} {tipKind} ") # --- load atomic geometry FFparams = PPU.loadSpecies() elem_dict = PPU.getFFdict(FFparams) - # print elem_dict atoms, nDim, lvec = io.loadGeometry(geomFile, format=geometry_format, parameters=parameters) atomstring = io.primcoords2Xsf(PPU.atoms2iZs(atoms[0], elem_dict), [atoms[1], atoms[2], atoms[3]], lvec) # --- prepare arrays and compute - if verbose > 0: - print(parameters.gridN, parameters.gridA, parameters.gridB, parameters.gridC) + logger.debug(f"{parameters.gridN}, {parameters.gridA}, {parameters.gridB}, {parameters.gridC}") _, Rs, Qs = PPU.parseAtoms(atoms, elem_dict=elem_dict, autogeom=False, PBC=parameters.PBC, lvec=lvec, parameters=parameters) FF, V = prepareArrays(None, computeVpot, parameters=parameters) core.setFF_shape(np.shape(FF), lvec, parameters=parameters) @@ -348,22 +326,18 @@ def computeELFF_pointCharge(geomFile, geometry_format=None, tip="s", save_format core.getCoulombFF(Rs0, Qs * PPU.CoulombConst, kind=tipKind) # THE MAIN STUFF HERE # --- post porces FFs if Fmax is not None: - if verbose > 0: - print("Clamp force >", Fmax) + logger.debug(f"Clamp force > {Fmax}") io.limit_vec_field(FF, Fmax=Fmax) if (Vmax is not None) and computeVpot: - if verbose > 0: - print("Clamp potential >", Vmax) + logger.debug(f"Clamp potential > {Vmax}") V[V > Vmax] = Vmax # remove too large values # --- save to files ? if save_format is not None: - if verbose > 0: - print("computeLJ Save ", save_format) + logger.debug(f"computeELFF Save {save_format}") io.save_vec_field("FFel", FF, lvec, data_format=save_format, head=atomstring, atomic_info=(atoms[:4], lvec)) if computeVpot: io.save_scal_field("Vel", V, lvec, data_format=save_format, head=atomstring, atomic_info=(atoms[:4], lvec)) - if verbose > 0: - print("<< 0: - print(" Valence Electron Dict : \n", valElDict_) + from .defaults import valelec_dict + + valElDict_ = valelec_dict.valElDict + logger.debug(f"Valence electrons loaded from defaults") + logger.debug(f" Valence Electron Dict : \n {valElDict_}") return valElDict_ @@ -439,23 +411,19 @@ def subtractCoreDensities( elems, Rs = getAtomsWhichTouchPBCcell(fname, Rcut=Rcore, bSaveDebug=bSaveDebugDens) if valElDict is None: valElDict = loadValenceElectronDict() - print("subtractCoreDensities valElDict ", valElDict) - print("subtractCoreDensities elems ", elems) + logger.debug(f"subtractCoreDensities valElDict {valElDict}") + logger.debug(f"subtractCoreDensities elems {elems}") cRAs = np.array([(-valElDict[elem], Rcore) for elem in elems]) V = np.linalg.det(lvec[1:]) # volume of triclinic cell N = nDim[0] * nDim[1] * nDim[2] dV = V / N # volume of one voxel - if verbose > 0: - print("V : ", V, " N: ", N, " dV: ", dV) - if verbose > 0: - print("sum(RHO): ", rho.sum(), " Nelec: ", rho.sum() * dV, " voxel volume: ", dV) # check sum + logger.debug(f"V : {V}, N: {N}, dV: {dV}") + logger.debug(f"sum(RHO): {rho.sum()}, Nelec: {rho.sum() * dV}, voxel volume: {dV}") # check sum core.setFF_shape(rho.shape, lvec, parameters=parameters) # set grid sampling dimension and shape core.setFF_Epointer(rho) # set pointer to array with density data (to write into) - if verbose > 0: - print(">>> Projecting Core Densities ... ") + logger.debug(">>> Projecting Core Densities ... ") core.getDensityR4spline(shift_positions(Rs, -lvec[0]), cRAs.copy()) # Do the job ( the Projection of atoms onto grid ) - if verbose > 0: - print("sum(RHO), Nelec: ", rho.sum(), rho.sum() * dV) # check sum + logger.debug(f"sum(RHO), Nelec: {rho.sum()}, {rho.sum() * dV}") # check sum if bSaveDebugDens: io.saveXSF("rho_subCoreChg.xsf", rho, lvec, head=head) diff --git a/ppafm/PPPlot.py b/ppafm/PPPlot.py index bf772f9e..ee02f8b7 100644 --- a/ppafm/PPPlot.py +++ b/ppafm/PPPlot.py @@ -1,11 +1,11 @@ #!/usr/bin/python -import sys - import matplotlib.pyplot as plt import numpy as np from matplotlib.colors import LinearSegmentedColormap +from .logging_utils import ProgressLogger + # =========== defaults default_figsize = (8, 8) @@ -58,11 +58,6 @@ def colorize_XY2RG(Xs, Ys): return c, vmax -def write_plotting_slice(i): - sys.stdout.write(f"\r plotting slice # {i}") - sys.stdout.flush() - - # =========== plotting functions @@ -85,9 +80,9 @@ def plotImages( V0=0.0, cbar_label=None, ): + progress_logger = ProgressLogger("plot", pre_message="Plotting slice # ") for ii, i in enumerate(slices): - # print(" plotting ", i) - write_plotting_slice(i) + progress_logger.print_message(i, is_last=ii == (len(slices) - 1)) if symmetric_map: limit = max(abs(np.min(F[i] - V0)), abs(np.max(F[i] - V0))) vmin = -limit + V0 @@ -116,9 +111,9 @@ def plotImages( def plotVecFieldRG( prefix, dXs, dYs, slices, extent=None, zs=None, figsize=default_figsize, interpolation=default_interpolation, atoms=None, bonds=None, atomSize=default_atom_size ): + progress_logger = ProgressLogger("plot", pre_message="Plotting slice # ") for ii, i in enumerate(slices): - # print(" plotting ", i) - write_plotting_slice(i) + progress_logger.print_message(i, is_last=ii == (len(slices) - 1)) plt.figure(figsize=(10, 10)) HSBs, vmax = colorize_XY2RG(dXs[i], dYs[i]) plt.imshow(HSBs, extent=extent, origin="lower", interpolation=interpolation) @@ -153,9 +148,9 @@ def plotDistortions( bonds=None, atomSize=default_atom_size, ): + progress_logger = ProgressLogger("plot", pre_message="Plotting slice # ") for ii, i in enumerate(slices): - # print(" plotting ", i) - write_plotting_slice(i) + progress_logger.print_message(i, is_last=ii == (len(slices) - 1)) plt.figure(figsize=figsize) plt.plot(X[i, ::by, ::by].flat, Y[i, ::by, ::by].flat, "r.", markersize=markersize) if BG is not None: @@ -196,9 +191,9 @@ def plotArrows( bonds=None, atomSize=default_atom_size, ): + progress_logger = ProgressLogger("plot", pre_message="Plotting slice # ") for ii, i in enumerate(slices): - # print(" plotting ", i) - write_plotting_slice(i) + progress_logger.print_message(i, is_last=ii == (len(slices) - 1)) plt.figure(figsize=figsize) plt.quiver(Xs[::by, ::by], Ys[::by, ::by], dX[::by, ::by], dY[::by, ::by], color="k", headlength=10, headwidth=10, scale=15) if BG is not None: diff --git a/ppafm/__init__.py b/ppafm/__init__.py index 0a4334fb..5dffde5d 100755 --- a/ppafm/__init__.py +++ b/ppafm/__init__.py @@ -1,4 +1,8 @@ #!/usr/bin/python from .common import * +from .logging_utils import configure_logging from .version import __version__ + +# This sets the logging level and other options immediately from environment variables as the ppafm package is imported. +configure_logging() diff --git a/ppafm/atomicUtils.py b/ppafm/atomicUtils.py index bbf28f89..3235de20 100644 --- a/ppafm/atomicUtils.py +++ b/ppafm/atomicUtils.py @@ -5,6 +5,9 @@ import numpy as np from . import elements +from .logging_utils import get_logger + +logger = get_logger("atomicUtils") def neighs(natoms, bonds): @@ -163,12 +166,12 @@ def loadCoefs(characters=["s"]): coefs = [] for char in characters: fname = "phi_0000_%s.dat" % char - print(fname) + logger.debug(fname) raw = np.genfromtxt(fname, skip_header=1) Es = raw[:, 0] cs = raw[:, 1:] sh = cs.shape - print(("shape : ", sh)) + logger.debug(f"Shape of data: {sh}") cs = cs.reshape(sh[0], sh[1] // 2, 2) d = cs[:, :, 0] ** 2 + cs[:, :, 1] ** 2 coefs.append(cs[:, :, 0] + 1j * cs[:, :, 1]) @@ -200,7 +203,7 @@ def histR(ps, dbin=None, Rmax=None, weights=None): if Rmax is None: Rmax = rs.max() + 0.5 bins = np.linspace(0, Rmax, int(Rmax / (dbin)) + 1) - print((rs.shape, weights.shape)) + logger.debug(f"{rs.shape} {weights.shape}") return np.histogram(rs, bins, weights=weights) @@ -224,7 +227,7 @@ def findBonds(atoms, iZs, sc, ELEMENTS=elements.ELEMENTS, FFparams=None): ii = iZs[i] - 1 jj = iZs[j] - 1 bondlength = ELEMENTS[ii][6] + ELEMENTS[jj][6] - print(" find bond ", i, j, bondlength, r, sc, (xs[i], ys[i], zs[i]), (xs[j], ys[j], zs[j])) + logger.debug(f"find bond {i} {j} {bondlength} {r} {sc} ({xs[i]}, {ys[i]}, {zs[i]}) ({xs[j]}, {ys[j]}, {zs[j]})") if r < (sc * bondlength): bonds.append((i, j)) return bonds diff --git a/ppafm/chemistry.py b/ppafm/chemistry.py index d734410e..da9e60b9 100644 --- a/ppafm/chemistry.py +++ b/ppafm/chemistry.py @@ -1,16 +1,16 @@ #!/usr/bin/python - import numpy as np from . import elements +from .logging_utils import get_logger + +logger = get_logger("chemistry") # =========================== # Molecular Topology # =========================== -iDebug = 0 - def findBonds(xyzs, Rs, fR=1.3): n = len(xyzs) @@ -73,15 +73,12 @@ def findTris(bonds, neighs): for i in a_ngs: if i in b_ngs: common.append(i) - # print "bond ",b," common ",common ncm = len(common) if ncm > 2: - if iDebug > 0: - print("WARRNING: bond ", b, " common neighbors ", common) + logger.warning(f"bond {b} common neighbors {common}") continue elif ncm < 1: - if iDebug > 0: - print("WARRNING: bond ", b, " common neighbors ", common) + logger.warning(f"bond {b} common neighbors {common}") continue tri0 = tuple(sorted(b + (common[0],))) tris.add(tri0) @@ -142,7 +139,7 @@ def trisToPoints(tris, ps): def removeBorderAtoms(ps, cog, R): rnds = np.random.rand(len(ps)) - r2s = np.sum((ps - cog[None, :]) ** 2, axis=1) # ;print "r2s ", r2s, R*R + r2s = np.sum((ps - cog[None, :]) ** 2, axis=1) mask = rnds > r2s / (R * R) return mask @@ -150,9 +147,8 @@ def removeBorderAtoms(ps, cog, R): def validBonds(bonds, mask, na): a2a = np.cumsum(mask) - 1 bonds_ = [] - # print mask + logger.debug(f"validBonds mask: {mask}") for i, j in bonds: - # print i,j if mask[i] and mask[j]: bonds_.append((a2a[i], a2a[j])) return bonds_ @@ -196,7 +192,7 @@ def ringsToMolecule(ring_pos, ring_Rs, Lrange=6.0): ring_N6mask[atom2ring[:,2]] ) ) # fmt: on - neighs = bonds2neighs(bonds, len(atom_pos)) # ;print neighs + neighs = bonds2neighs(bonds, len(atom_pos)) nngs = np.array([len(ngs) for ngs in neighs], dtype=int) atypes = nngs.copy() - 1 @@ -257,7 +253,7 @@ def selectRandomGroups(an, ao, groupDict): out.append(groups[il + 1][0]) else: out.append(None) - print(k, out[-1]) + logger.debug(f"{k} {out[-1]}") return out @@ -395,7 +391,7 @@ def appendHs(txyz, Hmask, elems, xyzs, e1="H", e2="He"): if ndir == 4: # ==== tetrahedral flip = np.random.randint(2) * 2 - 1 if nsigma == 1: # like -CH3 - print(name, ndir, nsigma, nH) + logger.debug(f"{name} {ndir} {nsigma} {nH}") txyz = makeTetrahedron(pi - ps[ngs[0]], up * flip) + pi[None, :] appendHs(txyz, Hmasks3[nH], elems, xyzs) elif nsigma == 2: # like -CH2- @@ -417,12 +413,10 @@ def appendHs(txyz, Hmask, elems, xyzs, e1="H", e2="He"): appendHs(normalize(pi - ps[ngs[0]])[0] + pi[None, :], [(1,)], elems, xyzs) else: - print("Group >>%s<< not known" % name) - print("len(xyzs), len(elems) ", len(xyzs), len(elems)) + logger.info(f"Group >>{name}<< not known") + logger.debug(f"len(xyzs), len(elems) {len(xyzs)} {len(elems)}") for xyz in xyzs: - print(len(xyz), end=" ") - if len(xyz) != 3: - print(xyz) + logger.debug(f"{len(xyz)} {xyz if len(xyz) != 3 else ''}") return np.array(xyzs), elems diff --git a/ppafm/cli/conv_rho.py b/ppafm/cli/conv_rho.py index 313264fd..936616cb 100755 --- a/ppafm/cli/conv_rho.py +++ b/ppafm/cli/conv_rho.py @@ -1,9 +1,13 @@ #!/usr/bin/python import gc +import sys import numpy as np from .. import common, fieldFFT, io +from ..logging_utils import get_logger + +logger = get_logger("conv_rho") def handle_aeccar(fname, lvec, rho): @@ -35,21 +39,23 @@ def main(argv=None): args = parser.parse_args(argv) - print(">>> Loading sample from ", args.sample, " ... ") + logger.info(f"Loading sample from {args.sample}") rho_sample, lvec_sample, n_dim_sample, head_sample = io.loadXSF(args.sample) - print(">>> Loading tip from ", args.tip, " ... ") + logger.info(f"Loading tip from {args.tip}") rho_tip, lvec_tip, n_dim_tip, head_tip = io.loadXSF(args.tip) if np.any(n_dim_sample != n_dim_tip): - raise Exception("Tip and Sample grids have different dimensions! - sample: " + str(n_dim_sample) + " tip: " + str(n_dim_tip)) + logger.error(f"Tip and Sample grids have different dimensions! - sample: {n_dim_sample} tip: {n_dim_tip}") + sys.exit(1) if np.any(lvec_sample != lvec_tip): - raise Exception("Tip and Sample grids have different shapes! - sample: " + str(lvec_sample) + " tip: " + str(lvec_tip)) + logger.error(f"Tip and Sample grids have different shapes! - sample: {lvec_sample} tip: {lvec_tip}") + sys.exit(1) handle_aeccar(args.sample, lvec_sample, rho_sample) handle_aeccar(args.tip, lvec_tip, rho_tip) if args.Bpauli > 0.0: - print(">>> computing rho^B where B = ", args.Bpauli) + logger.info(f"Computing rho^B where B = {args.Bpauli}") # NOTE: due to round-off error the density from DFT code is often negative in some voxels which produce NaNs after exponentiation; we need to correct this if not args.no_negative_check: handle_negative_density(rho_sample) @@ -61,15 +67,15 @@ def main(argv=None): io.save_scal_field("tip_density_pow_%03.3f.xsf" % args.Bpauli, rho_tip, lvec_tip, data_format=args.output_format, head=head_tip) if args.density_cutoff: - print(f">>> Applying a density cutoff of {args.density_cutoff} to sample and tip electron densities.") + logger.info(f"Applying a density cutoff of {args.density_cutoff} to sample and tip electron densities.") rho_sample[rho_sample > args.density_cutoff] = args.density_cutoff rho_tip[rho_tip > args.density_cutoff] = args.density_cutoff - print(">>> Evaluating convolution E(R) = A*Integral_r ( rho_tip^B(r-R) * rho_sample^B(r) ) using FFT ... ") + logger.info("Evaluating convolution E(R) = A*Integral_r ( rho_tip^B(r-R) * rho_sample^B(r) ) using FFT ... ") f_x, f_y, f_z, energy = fieldFFT.potential2forces_mem(rho_sample, lvec_sample, n_dim_sample, rho=rho_tip, doForce=True, doPot=True, deleteV=True) namestr = args.output - print(">>> Saving result of convolution to FF_", namestr, "_?.xsf ... ") + logger.info(f">>> Saving result of convolution to FF_{namestr}_?.xsf ... ") # Density Overlap Model if args.energy: diff --git a/ppafm/cli/generateDFTD3.py b/ppafm/cli/generateDFTD3.py index 014a93e2..2bfb2401 100755 --- a/ppafm/cli/generateDFTD3.py +++ b/ppafm/cli/generateDFTD3.py @@ -7,6 +7,9 @@ from .. import common from ..HighLevel import computeDFTD3 +from ..logging_utils import get_logger + +logger = get_logger("generateDFTD3") def main(argv=None): @@ -45,7 +48,7 @@ def main(argv=None): df_params = {"s6": p[0], "s8": p[1], "a1": p[2], "a2": p[3]} else: if args.df_name not in d3.DF_DEFAULT_PARAMS: - print(f"Unknown functional name `{args.df_name}`!") + logger.error(f"Unknown functional name `{args.df_name}`!") sys.exit(1) df_params = args.df_name diff --git a/ppafm/cli/generateElFF.py b/ppafm/cli/generateElFF.py index 191fa1ec..e2381f52 100755 --- a/ppafm/cli/generateElFF.py +++ b/ppafm/cli/generateElFF.py @@ -12,6 +12,9 @@ loadValenceElectronDict, subtractCoreDensities, ) +from ..logging_utils import get_logger + +logger = get_logger("generateElFF") def main(argv=None): @@ -43,7 +46,8 @@ def main(argv=None): subtract_core_densities = (args.doDensity) and (args.Rcore > 0.0) and (args.tip_dens is not None) if subtract_core_densities: # We do it here, in case it crash we don't want to wait for all the huge density files to load if args.tip_dens is None: - raise Exception("Rcore>0 but no tip density provided!") + logger.error("Rcore>0 but no tip density provided!") + sys.exit(1) valence_electrons_dictionary = loadValenceElectronDict() rs_tip, elems_tip = getAtomsWhichTouchPBCcell(args.tip_dens, Rcut=args.Rcore, parameters=parameters) @@ -66,43 +70,42 @@ def main(argv=None): if args.tip_dens is not None: # No need to renormalize: fieldFFT already works with density - print(">>> Loading tip density from ", args.tip_dens, "...") + logger.info(f">>> Loading tip density from {args.tip_dens}") if args.tip_dens.lower().endswith("xsf"): rho_tip, lvec_tip, _, head_tip = io.loadXSF(args.tip_dens) else: - print(f'ERROR!!! Unknown or unsupported format of the tip density file "{args.tip_dens}"\n', file=sys.stderr) + logger.error(f'Unknown or unsupported format of the tip density file "{args.tip_dens}"\n') sys.exit(1) if subtract_core_densities: - print(">>> subtracting core densities from rho_tip ... ") + logger.info(">>> subtracting core densities from rho_tip ... ") subtractCoreDensities(rho_tip, lvec_tip, elems=elems_tip, Rs=rs_tip, valElDict=valence_electrons_dictionary, Rcore=args.Rcore, head=head_tip) parameters.tip = -rho_tip # Negative sign, because the electron density needs to be negative but the input density is positive if args.KPFM_sample is not None: sigma = parameters.sigma - print(parameters.sigma) + logger.debug(f"{parameters.sigma}") if input_format == "xsf" and args.KPFM_sample.lower().endswith(".xsf"): v_ref_s = args.Vref - print(">>> Loading Hartree potential under bias from ", args.KPFM_sample, "...") - print("Use loadXSF") + logger.info(f">>> Loading Hartree potential under bias from {args.KPFM_sample}") + logger.debug("Use loadXSF") v_kpfm, lvec, n_dim, head = io.loadXSF(args.KPFM_sample) elif input_format == "cube" and args.KPFM_sample.lower().endswith(".cube"): v_ref_s = args.Vref - print(">>> Loading Hartree potential under bias from ", args.KPFM_sample, "...") - print("Use loadCUBE") + logger.info(f">>> Loading Hartree potential under bias from {args.KPFM_sample}") + logger.debug("Use loadCUBE") v_kpfm, lvec, n_dim, head = io.loadCUBE(args.KPFM_sample) else: - print( - f'ERROR!!! Format of the "{args.KPFM_sample}" file with Hartree potential under bias is unknown or incompatible with the main input format, which is "{input_format}".\n', - file=sys.stderr, + logger.error( + f'Format of the "{args.KPFM_sample}" file with Hartree potential under bias is unknown or incompatible with the main input format, which is "{input_format}".\n' ) sys.exit(1) v_kpfm *= -1 # Unit conversion, energy to potential (eV -> V) dv_kpfm = v_kpfm - electrostatic_potential - print(">>> Loading tip density under bias from ", args.KPFM_tip, "...") + logger.info(f">>> Loading tip density under bias from {args.KPFM_tip}") if input_format == "xsf" and args.KPFM_tip.lower().endswith(".xsf"): v_ref_t = args.Vref rho_tip_kpfm, lvec_tip, _, head_tip = io.loadXSF(args.KPFM_tip) @@ -118,28 +121,23 @@ def main(argv=None): if parameters.probeType == "8": drho_kpfm = {"pz": 0.045} sigma = 0.48 - print(" Select CO-tip polarization ") + logger.info("Select CO-tip polarization ") if parameters.probeType == "47": drho_kpfm = {"pz": 0.21875} sigma = 0.7 - print(" Select Ag polarization with decay sigma", sigma) + logger.info(f"Select Ag polarization with decay sigma {sigma}") if parameters.probeType == "54": drho_kpfm = {"pz": 0.250} sigma = 0.67 - print(" Select Xe-tip polarization") + logger.info("Select Xe-tip polarization") else: - raise ValueError( - 'ERROR!!! Neither is "' - + args.KPFM_sample - + '" a density file with an appropriate ("' - + input_format - + '") format\nnor is it a valid name of a tip polarizability model.\n' - ) + logger.error(f'Neither is "{args.KPFM_sample}" a density file with an appropriate ("{input_format}") format nor is it a valid name of a tip polarizability model.') + sys.exit(1) ff_kpfm_t0sv, _ = computeElFF(dv_kpfm, lvec, n_dim, parameters.tip, computeVpot=args.energy, tilt=args.tilt, parameters=parameters) ff_kpfm_tvs0, _ = computeElFF(electrostatic_potential, lvec, n_dim, drho_kpfm, computeVpot=args.energy, tilt=args.tilt, sigma=sigma, deleteV=False, parameters=parameters) - print("Linear E to V") + logger.debug("Linear E to V") zpos = np.linspace(lvec[0, 2] - args.z0, lvec[0, 2] + lvec[3, 2] - args.z0, n_dim[0]) for i in range(n_dim[0]): # z position of the KPFM tip with respect to the sample must not be zero or negative @@ -149,14 +147,14 @@ def main(argv=None): ff_kpfm_t0sv[i, :, :] = ff_kpfm_t0sv[i, :, :] / ((v_ref_s) * (zpos[i] + 0.1)) ff_kpfm_tvs0[i, :, :] = ff_kpfm_tvs0[i, :, :] / ((v_ref_t) * (zpos[i] + 0.1)) - print(">>> Saving electrostatic forcefield ... ") + logger.info(">>> Saving electrostatic forcefield") io.save_vec_field("FFkpfm_t0sV", ff_kpfm_t0sv, lvec_samp, data_format=args.output_format, head=head_samp) io.save_vec_field("FFkpfm_tVs0", ff_kpfm_tvs0, lvec_samp, data_format=args.output_format, head=head_samp) - print(">>> Calculating electrostatic forcefield with FFT convolution as Eel(R) = Integral( rho_tip(r-R) V_sample(r) ) ... ") + logger.info(">>> Calculating electrostatic forcefield with FFT convolution as Eel(R) = Integral( rho_tip(r-R) V_sample(r) )") ff_electrostatic, e_electrostatic = computeElFF(electrostatic_potential, lvec, n_dim, parameters.tip, computeVpot=args.energy, tilt=args.tilt, parameters=parameters) - print(">>> Saving electrostatic forcefield ... ") + logger.info(">>> Saving electrostatic forcefield") io.save_vec_field("FFel", ff_electrostatic, lvec_samp, data_format=args.output_format, head=head_samp, atomic_info=(atoms_samp[:4], lvec_samp)) if args.energy: diff --git a/ppafm/cli/generateTraining_PVE.py b/ppafm/cli/generateTraining_PVE.py index 7e897a04..21ddc7ac 100644 --- a/ppafm/cli/generateTraining_PVE.py +++ b/ppafm/cli/generateTraining_PVE.py @@ -10,13 +10,16 @@ from .. import common, core from ..HighLevel import prepareArrays, relaxedScan3D +from ..logging_utils import get_logger + +logger = get_logger("generateTraining_PVE") file_format = "xsf" parameters = common.PpafmParameters.from_file("params.ini") if os.path.isfile("atomtypes.ini"): - print(">> LOADING LOCAL atomtypes.ini") + logger.info(">> LOADING LOCAL atomtypes.ini") ff_params = common.loadSpecies("atomtypes.ini") else: ff_params = common.loadSpecies(cpp_utils.PACKAGE_PATH / "defaults" / "atomtypes.ini") @@ -32,11 +35,11 @@ parameters.gridA = lvec_t[1] parameters.gridB = lvec_t[2] parameters.gridC = lvec_t[3] # must be before parseAtoms -print(parameters.gridN, parameters.gridA, parameters.gridB, parameters.gridC) +logger.debug(parameters.gridN, parameters.gridA, parameters.gridB, parameters.gridC) force_field, _ = prepareArrays(None, False) -print("FFLJ.shape", force_field.shape) +logger.debug("FFLJ.shape", force_field.shape) core.setFF_shape(np.shape(force_field), lvec_t, parameters=parameters) base_dir = os.getcwd() @@ -54,7 +57,6 @@ force_field[:, :, :, :] = 0 lj_coefficients = common.getAtomsLJ(pp_indexes, izs, ff_params) - # print "cLJs",cLJs; np.savetxt("cLJs_3D.dat", cLJs); exit() core.getVdWFF(rs, lj_coefficients) # THE MAIN STUFF HERE # Generate Pauli force field. diff --git a/ppafm/cli/gui/ppafm_gui.py b/ppafm/cli/gui/ppafm_gui.py index a82ad4d8..a98265cb 100755 --- a/ppafm/cli/gui/ppafm_gui.py +++ b/ppafm/cli/gui/ppafm_gui.py @@ -13,16 +13,20 @@ import matplotlib import numpy as np -from PyQt5 import QtCore, QtGui, QtWidgets +from PyQt5 import QtCore, QtWidgets import ppafm.common as PPU import ppafm.GUIWidgets as guiw -import ppafm.ocl.field as FFcl import ppafm.ocl.oclUtils as oclu -from ppafm import PPPlot, io +from ppafm import io from ppafm.ocl.AFMulator import AFMulator from ppafm.ocl.field import ElectronDensity, HartreePotential, TipDensity +from ...logging_utils import configure_logging, get_logger, get_perf_logger + +logger = get_logger("ppafm-gui") +perf_logger = get_perf_logger("ppafm-gui") + matplotlib.use("Qt5Agg") @@ -94,9 +98,10 @@ def parse_args(): parser = ArgumentParser(description="Probe Particle Model graphical user interface") # fmt: off parser.add_argument("input", nargs='*', help="Optional input file(s). The first file is the main input file containing the xyz geometry or Hartree potential. The subsequent optional files, in order, are the sample electron density, tip electron density, and tip electron delta density.") - parser.add_argument("-d", "--device", action="store", type=int, default=0, help="Choose OpenCL device.") - parser.add_argument("-l", "--list-devices", action="store_true", help="List available OpenCL devices and exit.") - parser.add_argument("-v", '--verbosity', action="store", type=int, default=0, help="Set verbosity level (0-2).") + parser.add_argument("-d", "--device", action="store", type=int, default=0, help="Choose OpenCL device.") + parser.add_argument("-l", "--list-devices", action="store_true", help="List available OpenCL devices and exit.") + parser.add_argument("-v", '--log_level', action="store", default="INFO", help="Set logging level.") + parser.add_argument("-p", '--log_performance', action="store_true", help="Enable performance logging.") # fmt: on args = parser.parse_args() if args.input: @@ -118,7 +123,7 @@ class ApplicationWindow(QtWidgets.QMainWindow): df_range = (-1, 1) # min and max df value in colorbar fixed_df_range = False # Keep track if df range was fixed by user or should be set automatically - def __init__(self, input_files=None, device=0, verbose=0): + def __init__(self, input_files=None, device=0): self.df = None self.xyzs = None self.Zs = None @@ -136,16 +141,6 @@ def __init__(self, input_files=None, device=0, verbose=0): oclu.init_env(device) self.afmulator = AFMulator() - # Set verbosity level to same value everywhere - if verbose > 0: - print(f"Verbosity level = {verbose}") - self.verbose = verbose - self.afmulator.verbose = verbose - self.afmulator.forcefield.verbose = verbose - self.afmulator.scanner.verbose = verbose - FFcl.bRuntime = verbose > 1 - self.afmulator.bRuntime = verbose > 1 - # --- init QtMain QtWidgets.QMainWindow.__init__(self) self.setAttribute(QtCore.Qt.WA_DeleteOnClose) @@ -154,7 +149,7 @@ def __init__(self, input_files=None, device=0, verbose=0): l00 = QtWidgets.QHBoxLayout(self.main_widget) l1 = QtWidgets.QVBoxLayout(self.main_widget) l00.addLayout(l1, 2) - self.figCan = guiw.FigImshow(parentWiget=self.main_widget, parentApp=self, width=5, height=4, dpi=100, verbose=verbose) + self.figCan = guiw.FigImshow(parentWiget=self.main_widget, parentApp=self, width=5, height=4, dpi=100) l1.addWidget(self.figCan) self.resize(1200, 600) self.main_widget.setFocus() @@ -212,8 +207,7 @@ def setScanWindow(self, scan_size, scan_start, step, distance, amplitude): ) # fmt: on self.afmulator.kCantilever = self.bxCant_K.value() * 1000 self.afmulator.f0Cantilever = self.bxCant_f0.value() * 1000 - if self.verbose > 0: - print("setScanWindow", step, scan_size, scan_start, scan_dim, scan_window) + logger.debug(f"setScanWindow {step}, {scan_size}, {scan_start}, {scan_dim}, {scan_window}") # Set new values to the fields guiw.set_widget_value(self.bxSSx, scan_size[0]) @@ -235,8 +229,7 @@ def setScanWindow(self, scan_size, scan_start, step, distance, amplitude): # Update status bar info ff_dim = self.afmulator.forcefield.nDim self.dim_label.setText(f"Scan dim: {scan_dim[0]}x{scan_dim[1]}x{scan_dim[2]} | " f"FF dim: {ff_dim[0]}x{ff_dim[1]}x{ff_dim[2]}") - if self.verbose > 0: - print("lvec:\n", self.afmulator.forcefield.nDim, self.afmulator.lvec) + logger.debug(f"lvec:\n {self.afmulator.forcefield.nDim}, {self.afmulator.lvec}") def scanWindowFromGeom(self): """Infer and set scan window from current geometry""" @@ -271,8 +264,7 @@ def updateRotation(self): [np.sin(a), np.cos(a), 0], [ 0, 0, 1] ]) # fmt: on - if self.verbose > 0: - print("updateRotation", a, self.rot) + logger.debug(f"updateRotation {a}, {self.rot}") self.update() def updateParams(self, preset_none=True): @@ -303,8 +295,7 @@ def updateParams(self, preset_none=True): Qs = [Q, -2 * Q, Q, 0] QZs = [sigma, 0, -sigma, 0] - if self.verbose > 0: - print("updateParams", Q, sigma, multipole, tipStiffness, tipR0, use_point_charge, A_pauli, B_pauli, type(self.qs)) + logger.debug(f"updateParams {Q}, {sigma}, {multipole}, {tipStiffness}, {tipR0}, {use_point_charge}, {A_pauli}, {B_pauli}, {type(self.qs)}") if self.rho_sample is not None: # Use FDBM @@ -389,8 +380,7 @@ def updateDfColorbar(self): def setPBC(self, lvec, enabled): """Set periodic boundary condition lattice""" - if self.verbose > 0: - print("setPBC", lvec, enabled) + logger.debug(f"setPBC {lvec}, {enabled}") if enabled: self.sample_lvec = lvec @@ -460,8 +450,7 @@ def update(self): """Run simulation, and show the result""" if self.xyzs is None: return - if self.verbose > 1: - t0 = time.perf_counter() + t0 = time.perf_counter() self.status_message("Running simulation...") try: self.df = self.afmulator( @@ -473,10 +462,9 @@ def update(self): rot=self.rot, ) except Exception: - traceback.print_exc() + logger.error(f"Error during simulation:\n{traceback.format_exc()}") guiw.show_warning(self, f"Error during simulation! Error message:\n{traceback.format_exc()}", "Simulation error!") - if self.verbose > 1: - print(f"AFMulator total time [s]: {time.perf_counter() - t0}") + perf_logger.info(f"AFMulator total time [s]: {time.perf_counter() - t0}") self.status_message("Updating plot...") self.updateDataView() if self.FFViewer.isVisible(): @@ -510,8 +498,7 @@ def loadInput(self, main_input, rho_sample=None, rho_tip=None, rho_tip_delta=Non # Load input file file_name = os.path.split(main_input)[1].lower() ext = os.path.splitext(file_name)[1] - if self.verbose > 0: - print(f"loadInput: {main_input}, {file_name}, {ext}") + logger.debug(f"loadInput: {main_input}, {file_name}, {ext}") if file_name in ["poscar", "contcar"]: xyzs, Zs, lvec = io.loadPOSCAR(main_input) qs = np.zeros(len(Zs)) @@ -577,14 +564,12 @@ def loadInput(self, main_input, rho_sample=None, rho_tip=None, rho_tip_delta=Non for name, preset in Presets.items(): # Try to find a matching preset if preset["Z"] == Zpp: - if self.verbose > 0: - print(f"Setting preset `{name}` based on tip density file geometry.") + logger.info(f"Setting preset `{name}` based on tip density file geometry.") self.slPreset.setCurrentText(name) self.applyPreset(update=False) break else: - if self.verbose > 0: - print(f"Setting probe particle type `{Zpp}` based on tip density file geometry.") + logger.info(f"Setting probe particle type `{Zpp}` based on tip density file geometry.") guiw.set_widget_value(self.bxZPP, Zpp) self.bxPC.blockSignals(True) @@ -662,9 +647,8 @@ def showGeometry(self): from ase import Atoms from ase.visualize import view except ModuleNotFoundError: - print("No ase installation detected. Cannot show molecule geometry.") - if self.verbose > 1: - traceback.print_exc() + logger.error("No ase installation detected. Cannot show molecule geometry.") + logger.debug(traceback.format_exc()) return atoms = Atoms( positions=self.xyzs, @@ -677,15 +661,14 @@ def showGeometry(self): def openFile(self): self.openFileDialog.exec() file_paths = self.openFileDialog.paths - if self.verbose > 0: - print("openFile", file_paths) + logger.info(f"openFile {file_paths}") if file_paths is None: return self.status_message("Opening file(s)...") try: self.loadInput(**file_paths) except: - traceback.print_exc() + logger.error(f"Error while opening files:\n{traceback.format_exc()}") guiw.show_warning(self, f"Ran into an error while opening a file! Error message:\n{traceback.format_exc()}", "File open error!") self.status_message("Ready") @@ -697,8 +680,7 @@ def saveFig(self): if fileName: self.status_message("Saving image...") fileName = guiw.correct_ext(fileName, ".png") - if self.verbose > 0: - print("Saving image to :", fileName) + logger.info(f"Saving image to : {fileName}") self.figCan.fig.savefig(fileName, bbox_inches="tight") self.status_message("Ready") @@ -712,11 +694,10 @@ def saveDataW(self): ext = os.path.splitext(fileName)[1] if ext not in [".xyz", ".xsf"]: self.status_message("Unsupported file type in df save file path") - print(f"Unsupported file type in df save file path `{fileName}`") + logger.error(f"Unsupported file type in df save file path `{fileName}`") return self.status_message("Saving data...") - if self.verbose > 0: - print(f"Saving df data to {fileName}...") + logger.info(f"Saving df data to {fileName}...") if ext == ".xyz": data = self.df[:, :, -1].T xs = np.linspace(0, self.bxSSx.value(), data.shape[1], endpoint=False) @@ -740,11 +721,9 @@ def saveDataW(self): atomstring = io.primcoords2Xsf(self.Zs, self.xyzs.T, lvec) else: atomstring = io.XSF_HEAD_DEFAULT - io.saveXSF(fileName, data, lvecScan, head=atomstring, verbose=0) + io.saveXSF(fileName, data, lvecScan, head=atomstring) else: raise RuntimeError("This should not happen. Missing file format check?") - if self.verbose > 0: - print("Done saving df data.") self.status_message("Ready") def updateDataView(self): @@ -776,11 +755,10 @@ def updateDataView(self): self.figCan.plotSlice(data, -1, title=title, points=self.df_points, cbar_range=cbar_range, extent=extent) except Exception: - print(f"Failed to plot df slice.\n{traceback.format_exc()}") + logger.error(f"Failed to plot df slice.\n{traceback.format_exc()}") guiw.show_warning(self, f"Ran into an error while plotting! Error message:\n{traceback.format_exc()}", "Plot error!") - if self.verbose > 1: - print(f"plotSlice time {time.perf_counter() - t1:.5f} [s]") + perf_logger.info(f"plotSlice time {time.perf_counter() - t1:.5f} [s]") def clickImshow(self, x, y): if self.df is None: @@ -791,8 +769,7 @@ def clickImshow(self, x, y): x_step, y_step = self.bxStepX.value(), self.bxStepY.value() ix = int(round((x - x_min) / x_step)) iy = int(round((y - y_min) / y_step)) - if self.verbose > 0: - print("clickImshow", ix, iy, x, y) + logger.debug(f"clickImshow {ix}, {iy}, {x}, {y}") # Remember coordinates in case scan_start changes self.df_points.append((x, y)) @@ -811,8 +788,7 @@ def clearPoints(self): self.updateDataView() def zoomTowards(self, x, y, zoom_direction): - if self.verbose > 0: - print("zoomTowards", x, y, zoom_direction) + logger.debug(f"zoomTowards {x}, {y}, {zoom_direction}") scan_size = np.array([self.bxSSx.value(), self.bxSSy.value()]) scan_start = np.array([self.bxSCx.value(), self.bxSCy.value()]) @@ -846,8 +822,7 @@ def saveParams(self): fileName, _ = QtWidgets.QFileDialog.getSaveFileName(self, "Save parameters", default_path, "(*.ini)") if not fileName: return - if self.verbose > 0: - print(f"Saving current parameters to `{fileName}`") + logger.info(f"Saving current parameters to `{fileName}`") self.afmulator.save_params(fileName) self.status_message("Saved parameters") @@ -1272,7 +1247,7 @@ def _create_buttons_ui(self): self.l0.addLayout(vb) # Forcefield viewer - self.FFViewer = guiw.FFViewer(self, verbose=self.verbose) + self.FFViewer = guiw.FFViewer(self) bt = QtWidgets.QPushButton("View Forcefield", self) bt.setToolTip(TTips["view_ff"]) bt.clicked.connect(self.showFFViewer) @@ -1346,18 +1321,19 @@ def _spin_box(value_range, value, step, connect_func, tool_tip, parent, stretch= def main(): qApp = QtWidgets.QApplication(sys.argv) args = parse_args() + configure_logging(level=args.log_level, log_performance=args.log_performance) if args.list_devices: - print("\nAvailable OpenCL platforms:") + logger.info("Available OpenCL platforms:") oclu.print_platforms() sys.exit(0) try: - aw = ApplicationWindow(args.input, args.device, args.verbosity) + aw = ApplicationWindow(args.input, args.device) aw.show() sys.exit(qApp.exec_()) except SystemExit: pass except: - traceback.print_exc() + logger.error(f"Ran into an error:\n{traceback.format_exc()}") guiw.show_warning(None, f"Ran into an error!\n\n{traceback.format_exc()}", "Error!") diff --git a/ppafm/cli/plot_results.py b/ppafm/cli/plot_results.py index fc973775..121a9ebb 100644 --- a/ppafm/cli/plot_results.py +++ b/ppafm/cli/plot_results.py @@ -8,9 +8,12 @@ from .. import PPPlot, atomicUtils, common, io from ..HighLevel import symGauss +from ..logging_utils import get_logger + +logger = get_logger("plot_results") mpl.use("Agg") -print("plot WITHOUT Xserver") +logger.debug("plot WITHOUT Xserver") # this makes it run without Xserver (e.g. on supercomputer) # see http://stackoverflow.com/questions/4931376/generating-matplotlib-graphs-without-a-running-x-server @@ -47,8 +50,6 @@ def main(argv=None): if opt_dict["Laplace"]: from scipy.ndimage import laplace - print(" >> OVEWRITING SETTINGS by command line arguments ") - # Spring constants. if opt_dict["krange"] is not None: k_constants = np.linspace(opt_dict["krange"][0], opt_dict["krange"][1], int(opt_dict["krange"][2])) @@ -88,11 +89,11 @@ def main(argv=None): applied_bias = True if applied_bias: - print("Vs =", bias_voltages) - print("Ks =", k_constants) - print("Qs =", charges) - print("Amps =", amplitudes) - print(" ============= RUN ") + logger.info(f"Vs = {bias_voltages}") + logger.info(f"Ks = {k_constants}") + logger.info(f"Qs = {charges}") + logger.info(f"Amps = {amplitudes}") + logger.info(" ============= RUN ") dz = parameters.scanStep[2] tip_positions_x, tip_positions_y, tip_positions_z, _ = common.prepareScanGrids(parameters=parameters) @@ -136,7 +137,7 @@ def main(argv=None): dirname = f"Q{charge:1.2f}K{stiffness:1.2f}V{voltage:1.2f}" if opt_dict["pos"]: pp_positions, lvec, _, atomic_info_or_head = io.load_vec_field(dirname + "/PPpos", data_format=args.output_format) - print("Plotting PPpos: ") + logger.info("Plotting PPpos: ") PPPlot.plotDistortions( dirname + "/xy" + atoms_str + cbar_str, pp_positions[:, :, :, 0], @@ -150,14 +151,13 @@ def main(argv=None): markersize=2.0, cbar=opt_dict["cbar"], ) - print() if opt_dict["iets"] is not None: eigenvalue_k, lvec, _, atomic_info_or_head = io.load_vec_field(dirname + "/eigvalKs", data_format=args.output_format) iets_m = opt_dict["iets"][0] iets_e = opt_dict["iets"][1] iets_w = opt_dict["iets"][2] - print(f"Plotting IETS M={iets_m:f} V={iets_e:f} w={iets_w:f}") + logger.info(f"Plotting IETS M={iets_m:f} V={iets_e:f} w={iets_w:f}") e_vib = common.HBAR * np.sqrt((common.eVA_Nm * eigenvalue_k) / (iets_m * common.AUMASS)) iets = symGauss(e_vib[:, :, :, 0], iets_e, iets_w) + symGauss(e_vib[:, :, :, 1], iets_e, iets_w) + symGauss(e_vib[:, :, :, 2], iets_e, iets_w) @@ -172,7 +172,6 @@ def main(argv=None): atomSize=atom_size, cbar=opt_dict["cbar"], ) - print() PPPlot.plotImages( dirname + "/Evib" + atoms_str + cbar_str, @@ -185,7 +184,6 @@ def main(argv=None): atomSize=atom_size, cbar=opt_dict["cbar"], ) - print() PPPlot.plotImages( dirname + "/Kvib" + atoms_str + cbar_str, @@ -198,11 +196,10 @@ def main(argv=None): atomSize=atom_size, cbar=opt_dict["cbar"], ) - print() if opt_dict["bI"]: current, lvec, _, atomic_info_or_head = io.load_scal_field(dirname + "/OutI_boltzmann", data_format=args.output_format) - print("Plotting Boltzmann current: ") + logger.info("Plotting Boltzmann current: ") PPPlot.plotImages( dirname + "/OutI" + atoms_str + cbar_str, current, @@ -214,12 +211,10 @@ def main(argv=None): atomSize=atom_size, cbar=opt_dict["cbar"], ) - print() if opt_dict["LCPD_maps"]: if len(bias_voltages) < 3: - print("At last three different values of volage needed to evaluate LCPD!") - print("LCPD will not be calculated here.") + logger.warning("At least three different values of voltage needed to evaluate LCPD! LCPD will not be calculated here.") opt_dict["LCPD_maps"] = False else: # Prepare to calculate KPFM/LCPD @@ -277,7 +272,7 @@ def main(argv=None): for iv, voltage in enumerate(bias_voltages): parameters.Amplitude = amplitude amp_string = f"/Amp{amplitude:2.2f}" - print("Amplitude= ", amp_string) + logger.info(f"Amplitude = {amp_string}") dirname0 = f"Q{charge:1.2f}K{stiffness:1.2f}" if applied_bias: dirname = dirname0 + f"V{voltage:1.2f}" @@ -289,12 +284,7 @@ def main(argv=None): os.makedirs(dir_name_amplitude) if parameters.tiltedScan: - ( - f_out, - lvec, - _, - atomic_info_or_head, - ) = io.load_vec_field(dirname + "/OutF", data_format=args.output_format) + (f_out, lvec, _, atomic_info_or_head) = io.load_vec_field(dirname + "/OutF", data_format=args.output_format) dfs = common.Fz2df_tilt( f_out, parameters.scanTilt, @@ -307,12 +297,7 @@ def main(argv=None): lvec_df[0] = lvec_df[0] + lvec_df[3] / lvec_3_norm * amplitude / 2 lvec_df[3] = lvec_df[3] / lvec_3_norm * (lvec_3_norm - amplitude) else: - ( - fzs, - lvec, - _, - atomic_info_or_head, - ) = io.load_scal_field(dirname + "/OutFz", data_format=args.output_format) + (fzs, lvec, _, atomic_info_or_head) = io.load_scal_field(dirname + "/OutFz", data_format=args.output_format) if applied_bias: r_tip = parameters.Rtip for iz, z in enumerate(tip_positions_z): @@ -340,7 +325,7 @@ def main(argv=None): atomic_info=atomic_info_or_head, ) if opt_dict["df"]: - print("Plotting df: ") + logger.info("Plotting df: ") PPPlot.plotImages( dir_name_amplitude + "/df" + atoms_str + cbar_str, dfs, @@ -354,10 +339,9 @@ def main(argv=None): cbar=opt_dict["cbar"], cbar_label="df [Hz]", ) - print("") if opt_dict["Laplace"]: - print("Plotting Laplace-filtered df: ") + logger.info("Plotting Laplace-filtered df: ") df_laplace_filtered = dfs.copy() laplace(dfs, output=df_laplace_filtered) io.save_scal_field( @@ -380,10 +364,9 @@ def main(argv=None): atomSize=atom_size, cbar=opt_dict["cbar"], ) - print() if opt_dict["WSxM"]: - print(" printing df into WSxM files :") + logger.info("Printing df into WSxM files :") io.saveWSxM_3D(dir_name_amplitude + "/df", dfs, extent, slices=None) if opt_dict["LCPD_maps"]: @@ -399,7 +382,7 @@ def main(argv=None): if opt_dict["LCPD_maps"]: lcpd = -kpfm_b / (2 * kpfm_a) - print("Plotting LCPD: ") + logger.info("Plotting LCPD: ") if not os.path.exists(dir_name_lcpd): os.makedirs(dir_name_lcpd) PPPlot.plotImages( @@ -417,7 +400,6 @@ def main(argv=None): V0=args.V0, cbar_label="V_LCPD [V]", ) - print() PPPlot.plotImages( dir_name_lcpd + "/_Asym-LCPD" + atoms_str + cbar_str, @@ -433,7 +415,6 @@ def main(argv=None): symmetric_map=False, cbar_label="V_LCPD [V]", ) - print() io.save_scal_field( dir_name_lcpd + "/LCPD", @@ -445,7 +426,7 @@ def main(argv=None): ) if opt_dict["WSxM"]: - print("Saving LCPD into WSxM files :") + logger.info("Saving LCPD into WSxM files :") io.saveWSxM_3D(dir_name_lcpd + "/LCPD" + atoms_str, lcpd, extent, slices=None) if opt_dict["Fz"]: @@ -455,7 +436,7 @@ def main(argv=None): _, atomic_info_or_head, ) = io.load_scal_field(dirname + "/OutFz", data_format=args.output_format) - print("Plotting Fz: ") + logger.info("Plotting Fz: ") PPPlot.plotImages( dirname + "/Fz" + atoms_str + cbar_str, fzs, @@ -469,9 +450,8 @@ def main(argv=None): cbar=opt_dict["cbar"], cbar_label="Fz [eV/Angstrom]", ) - print("") - print(" ***** ALL DONE ***** ") + logger.info(" ***** ALL DONE ***** ") if __name__ == "__main__": diff --git a/ppafm/cli/relaxed_scan.py b/ppafm/cli/relaxed_scan.py index 8f886855..66a48739 100755 --- a/ppafm/cli/relaxed_scan.py +++ b/ppafm/cli/relaxed_scan.py @@ -9,6 +9,9 @@ from .. import common, core, io from ..GridUtils import interpolate_cartesian from ..HighLevel import perform_relaxation +from ..logging_utils import get_logger + +logger = get_logger("relaxed_scan") def rotate_vector(v, a): @@ -91,42 +94,41 @@ def main(argv=None): applied_bias = True if applied_bias: - print("Vs =", voltages) - print("Ks =", k_constants) - print("Qs =", charges) - - print(" ============= RUN ") + logger.info(f"Vs = {voltages}") + logger.info(f"Ks = {k_constants}") + logger.info(f"Qs = {charges}") + logger.info(" ============= RUN ") ff_vdw = ff_pauli = ff_electrostatics = ff_boltzman = ff_kpfm_t0sv = ff_kpfm_tvs0 = None if args.noLJ: - print("Apauli", parameters.Apauli) + logger.info(f"Using separate Pauli and vdW force fields. Apauli: {parameters.Apauli}") - print("Loading Pauli force field from FFpauli_{x,y,z}") + logger.info("Loading Pauli force field from FFpauli_{x,y,z}") ff_pauli, lvec, _, atomic_info_or_head = io.load_vec_field("FFpauli", data_format=args.output_format) ff_pauli[0, :, :, :], ff_pauli[1, :, :, :] = rotate_ff(ff_pauli[0, :, :, :], ff_pauli[1, :, :, :], opt_dict["rotate"]) - print("Loading vdW force field from FFvdW_{x,y,z}") + logger.info("Loading vdW force field from FFvdW_{x,y,z}") ff_vdw, lvec, _, atomic_info_or_head = io.load_vec_field("FFvdW", data_format=args.output_format) ff_vdw[0, :, :, :], ff_vdw[1, :, :, :] = rotate_ff(ff_vdw[0, :, :, :], ff_vdw[1, :, :, :], opt_dict["rotate"]) else: - print("Loading Lennard-Jones force field from FFLJ_{x,y,z}") + logger.info("Loading Lennard-Jones force field from FFLJ_{x,y,z}") ff_vdw, lvec, _, atomic_info_or_head = io.load_vec_field("FFLJ", data_format=args.output_format) ff_vdw[0, :, :, :], ff_vdw[1, :, :, :] = rotate_ff(ff_vdw[0, :, :, :], ff_vdw[1, :, :, :], opt_dict["rotate"]) if charged_system: - print("Loading electrostatic force field from FFel_{x,y,z}") + logger.info("Loading electrostatic force field from FFel_{x,y,z}") ff_electrostatics, lvec, _, atomic_info_or_head = io.load_vec_field("FFel", data_format=args.output_format) ff_electrostatics[0, :, :, :], ff_electrostatics[1, :, :, :] = rotate_ff(ff_electrostatics[0, :, :, :], ff_electrostatics[1, :, :, :], opt_dict["rotate"]) if args.boltzmann or args.bI: - print("Loading Boltzmann force field from FFboltz_{x,y,z}") + logger.info("Loading Boltzmann force field from FFboltz_{x,y,z}") ff_boltzman, lvec, _, atomic_info_or_head = io.load_vec_field("FFboltz", data_format=args.output_format) ff_boltzman[0, :, :, :], ff_boltzman[1, :, :, :] = rotate_ff(ff_boltzman[0, :, :, :], ff_boltzman[1, :, :, :], opt_dict["rotate"]) if applied_bias: - print("Loading electrostatic contribution from applied bias from FFkpfm_t0sV_{x,y,z} and FFkpfm_tVs0_{x,y,z}") + logger.info("Loading electrostatic contribution from applied bias from FFkpfm_t0sV_{x,y,z} and FFkpfm_tVs0_{x,y,z}") ff_kpfm_t0sv, lvec, _, atomic_info_or_head = io.load_vec_field("FFkpfm_t0sV", data_format=args.output_format) ff_kpfm_tvs0, lvec, _, atomic_info_or_head = io.load_vec_field("FFkpfm_tVs0", data_format=args.output_format) @@ -143,11 +145,11 @@ def main(argv=None): if args.tipspline is not None: try: tip_spline = core.SplineParameters.from_file(args.tipspline) - print(f"Loaded tip spline from {args.tipspline}") - print("xs: ", tip_spline.rff_xs) - print("ydys: ", tip_spline.rff_ydys) + logger.info(f"Loaded tip spline from {args.tipspline}") + logger.debug(f"xs: {tip_spline.rff_xs}") + logger.debug(f"ydys: {tip_spline.rff_ydys}") except: - print(f"Could not load tip spline from {args.tipspline}") + logger.error(f"Could not load tip spline from {args.tipspline}") sys.exit(1) else: tip_spline = None @@ -160,7 +162,7 @@ def main(argv=None): dirname = f"Q{charge:1.2f}K{stiffness:1.2f}" if applied_bias: dirname += f"V{voltage:1.2f}" - print(" Relaxed_scan for ", dirname) + logger.info(f"Relaxed_scan for {dirname}") if not os.path.exists(dirname): os.makedirs(dirname) @@ -187,7 +189,7 @@ def main(argv=None): if opt_dict["vib"] >= 0: which = opt_dict["vib"] - print(f" === Computing eigenvectors of dynamical matrix: which={which} ddisp={parameters.ddisp}") + logger.info(f" === Computing eigenvectors of dynamical matrix: which={which} ddisp={parameters.ddisp}") tip_positions_x, tip_positions_y, tip_positions_z, lvec_scan = common.prepareScanGrids() r_tips = np.array(np.meshgrid(tip_positions_x, tip_positions_y, tip_positions_z)).transpose(3, 1, 2, 0).copy() evals, evecs = core.stiffnessMatrix( @@ -197,9 +199,9 @@ def main(argv=None): ddisp=parameters.ddisp, tip_spline=tip_spline, ) - print("vib eigenval 1 min..max : ", np.min(evals[:, 0]), np.max(evals[:, 0])) - print("vib eigenval 2 min..max : ", np.min(evals[:, 1]), np.max(evals[:, 1])) - print("vib eigenval 3 min..max : ", np.min(evals[:, 2]), np.max(evals[:, 2])) + logger.debug(f"vib eigenval 1 min..max : {np.min(evals[:, 0])}..{np.max(evals[:, 0])}") + logger.debug(f"vib eigenval 2 min..max : {np.min(evals[:, 1])}..{np.max(evals[:, 1])}") + logger.debug(f"vib eigenval 3 min..max : {np.min(evals[:, 2])}..{np.max(evals[:, 2])}") io.save_vec_field(dirname + "/eigvalKs", evals.reshape(r_tips.shape), **data_info) if which > 0: io.save_vec_field(dirname + "/eigvecK1", evecs[0].reshape(r_tips.shape), **data_info) @@ -215,7 +217,7 @@ def main(argv=None): io.save_vec_field(dirname + "/PPpos", pp_positions, **data_info) if args.bI: - print("Calculating current from tip to the Boltzmann particle:") + logger.info("Calculating current from tip to the Boltzmann particle:") current_in, lvec, _, atomic_info_or_head = io.load_scal_field("I_boltzmann", data_format=args.output_format) current_out = interpolate_cartesian(current_in, pp_positions, cell=lvec[1:, :], result=None) io.save_scal_field(dirname + "/OutI_boltzmann", current_out, **data_info) diff --git a/ppafm/common.py b/ppafm/common.py index ed5b7ec0..98090068 100644 --- a/ppafm/common.py +++ b/ppafm/common.py @@ -11,8 +11,9 @@ import toml from . import cpp_utils +from .logging_utils import get_logger -verbose = 0 +logger = get_logger("common") # ====================== constants @@ -98,7 +99,7 @@ def from_file(cls, file_path: typing.Union[str, Path] = Path("params.ini")): def apply_options(self, obj): for key, value in obj.items(): - print("Applying", key, value) + logger.debug(f"Applying {key} {value}") if hasattr(self, key) and value is not None: setattr(self, key, value) @@ -123,39 +124,31 @@ def load_ini(self, lines): key = words[0] if hasattr(self, key): val = getattr(self, key) - if verbose > 0: - print(key, " is class ", val.__class__) + logger.debug(f"{key} is class {val.__class__}") try: if isinstance(val, bool): word = words[1].strip() setattr(self, key, word[0] == "T" or word[0] == "t") - if verbose > 0: - print(key, getattr(self, key), ">>", word, "<<") + logger.debug(f"{key} {getattr(self, key)} >> {word} <<") elif isinstance(val, float): setattr(self, key, float(words[1])) - if verbose > 0: - print(key, getattr(self, key), words[1]) + logger.debug(f"{key} {getattr(self, key)} {words[1]}") elif isinstance(val, int): setattr(self, key, int(words[1])) - if verbose > 0: - print(key, getattr(self, key), words[1]) + logger.debug(f"{key} {getattr(self, key)} {words[1]}") elif isinstance(val, str): setattr(self, key, words[1]) elif isinstance(val, list): if isinstance(val[0], float): setattr(self, key, [float(words[1]), float(words[2]), float(words[3])]) - if verbose > 0: - print(key, getattr(self, key), words[1], words[2], words[3]) + logger.debug(f"{key} {getattr(self, key)} {words[1]} {words[2]} {words[3]}") elif isinstance(val[0], int): - if verbose > 0: - print(key) + logger.debug(f"{key}") setattr(self, key, [int(words[1]), int(words[2]), int(words[3])]) - if verbose > 0: - print(key, getattr(self, key), words[1], words[2], words[3]) + logger.debug(f"{key} {getattr(self, key)} {words[1]} {words[2]} {words[3]}") else: setattr(self, key, [str(words[1]), float(words[2])]) - if verbose > 0: - print(key, getattr(self, key), words[1], words[2]) + logger.debug(f"{key} {getattr(self, key)} {words[1]} {words[2]}") except: raise ValueError(f"Error parsing parameters on line: {line}") from None else: @@ -532,11 +525,9 @@ def autoGridN(parameters): # load atoms species parameters form a file ( currently used to load Lennard-Jones parameters ) def loadSpecies(fname=None): if fname is None or not os.path.exists(fname): - if verbose > 0: - print("WARRNING: loadSpecies(None) => load default atomtypes.ini") + logger.debug("loadSpecies(None) => load default atomtypes.ini") fname = cpp_utils.PACKAGE_PATH / "defaults" / "atomtypes.ini" - if verbose > 0: - print(" loadSpecies from ", fname) + logger.info(f"loadSpecies from {fname}") # FFparams=np.genfromtxt(fname,dtype=[('rmin',np.float64),('epsilon',np.float64),('atom',int),('symbol', '|S10')],usecols=[0,1,2,3]) FFparams = np.genfromtxt( fname, @@ -558,7 +549,6 @@ def loadSpeciesLines(lines): for l in lines: l = l.split() if len(l) >= 5: - # print l parameters.append((float(l[0]), float(l[1]), float(l[2]), int(l[3]), l[4])) return np.array( parameters, @@ -579,8 +569,7 @@ def autoGeom(Rs, parameters, shiftXY=False, fitCell=False, border=3.0): """ zmax = max(Rs[2]) Rs[2] -= zmax - if verbose > 0: - print(" autoGeom subtracted zmax = ", zmax) + logger.debug(f"autoGeom subtracted zmax = {zmax}") xmin = min(Rs[0]) xmax = max(Rs[0]) @@ -595,27 +584,22 @@ def autoGeom(Rs, parameters, shiftXY=False, fitCell=False, border=3.0): parameters.scanMin[1] = 0 parameters.scanMax[0] = parameters.gridA[0] parameters.scanMax[1] = parameters.gridB[1] - if verbose > 0: - print(" autoGeom changed cell to = ", parameters.scanM) + logger.debug(f"autoGeom changed cell to = {parameters.scanMin}") if shiftXY: dx = -0.5 * (xmin + xmax) + 0.5 * (parameters.gridA[0] + parameters.gridB[0]) Rs[0] += dx dy = -0.5 * (ymin + ymax) + 0.5 * (parameters.gridA[1] + parameters.gridB[1]) Rs[1] += dy - if verbose > 0: - print(" autoGeom moved geometry by ", dx, dy) + logger.debug(f"autoGeom moved geometry by {dx}, {dy}") def wrapAtomsCell(Rs, da, db, avec, bvec): M = np.array((avec[:2], bvec[:2])) invM = np.linalg.inv(M) - if verbose > 0: - print(M) - if verbose > 0: - print(invM) + logger.debug(f"wrapAtomsCell M = \n{M}") + logger.debug(f"wrapAtomsCell invM = \n{invM}") ABs = np.dot(Rs[:, :2], invM) - if verbose > 0: - print("ABs.shape", ABs.shape) + logger.debug(f"wrapAtomsCell ABs.shape = {ABs.shape}") ABs[:, 0] = (ABs[:, 0] + 10 + da) % 1.0 ABs[:, 1] = (ABs[:, 1] + 10 + db) % 1.0 Rs[:, :2] = np.dot(ABs, M) @@ -746,7 +730,6 @@ def PBCAtoms3D_np(Zs, Rs, Qs, cLJs, REAs, lvec, npbc=[1, 1, 1]): multiply atoms of sample along supercell vectors the multiplied sample geometry is used for evaluation of forcefield in Periodic-boundary-Conditions ( PBC ) """ - # print( "PBCAtoms3D_np lvec", lvec ) mx = npbc[0] * 2 + 1 my = npbc[1] * 2 + 1 mz = npbc[2] * 2 + 1 @@ -823,8 +806,7 @@ def multRot(Zs, Rs, Qs, cLJs, rots, cog=(0, 0, 0)): def getFFdict(FFparams): elem_dict = {} for i, ff in enumerate(FFparams): - if verbose > 0: - print(i, ff) + logger.debug(f"getFFdict {i} {ff}") elem_dict[ff[4]] = i + 1 return elem_dict @@ -846,14 +828,12 @@ def atoms2iZs(names, elem_dict): def parseAtoms(atoms, elem_dict, PBC=True, autogeom=False, lvec=None, parameters=None): Rs = np.array([atoms[1], atoms[2], atoms[3]]) if elem_dict is None: - if verbose > 0: - print("WARRNING: elem_dict is None => iZs are zero") + logger.warning("elem_dict is None => iZs are zero") iZs = np.zeros(len(atoms[0])) else: iZs = atoms2iZs(atoms[0], elem_dict) if autogeom: - if verbose > 0: - print("WARRNING: autoGeom shifts atoms") + logger.warning("autoGeom shifts atoms") autoGeom(Rs, shiftXY=True, fitCell=True, border=3.0) Rs = np.transpose(Rs, (1, 0)).copy() Qs = np.array(atoms[4]) diff --git a/ppafm/core.py b/ppafm/core.py index b84276b0..271aff4e 100755 --- a/ppafm/core.py +++ b/ppafm/core.py @@ -9,6 +9,9 @@ from . import cpp_utils from .defaults import d3 from .io import bohrRadius2angstroem +from .logging_utils import get_logger + +logger = get_logger("core") # ============================== # ============================== interface to C++ core @@ -59,8 +62,7 @@ def setGridCell(cell=None, parameters=None): cell = cell[1:, :].copy() else: raise ValueError("cell has wrong format") - exit() - print("cell", cell) + logger.debug(f"cell {cell}") lib.setGridCell(cell) @@ -125,10 +127,10 @@ def setFF(cell=None, gridF=None, gridE=None, parameters=None): ] ).copy() if n_ is not None: - print("setFF() n_ : ", n_) + logger.debug(f"setFF() n_ : {n_}") setFF_shape(n_, cell, parameters=parameters) else: - "Warrning : setFF shape not set !!!" + logger.warning("setFF shape not set !!!") # void setRelax( int maxIters, double convF2, double dt, double damping ) @@ -163,11 +165,11 @@ def setTip(lRadial=None, kRadial=None, rPP0=None, kSpring=None, parameters=None) rPP0 = np.array((parameters.r0Probe[0], parameters.r0Probe[1], 0.0)) if kSpring is None: kSpring = np.array((parameters.klat, parameters.klat, 0.0)) / -PPU.eVA_Nm - print(" IN setTip !!!!!!!!!!!!!! ") - print(" lRadial ", lRadial) - print(" kRadial ", kRadial) - print(" rPP0 ", rPP0) - print(" kSpring ", kSpring) + logger.debug("IN setTip !!!!!!!!!!!!!! ") + logger.debug(f"lRadial {lRadial}") + logger.debug(f"kRadial {kRadial}") + logger.debug(f"rPP0 {rPP0}") + logger.debug(f"kSpring {kSpring}") lib.setTip(lRadial, kRadial, rPP0, kSpring) @@ -299,7 +301,7 @@ def computeD3Coeffs(Rs, iZs, iZPP, df_params): def getMorseFF(Rs, REs, alpha=None, parameters=None): if alpha is None: alpha = parameters.aMorse - print(f"getMorseFF: alpha: {alpha} [1/A] ") + logger.debug(f"getMorseFF: alpha: {alpha} [1/A] ") natom = len(Rs) lib.getMorseFF(natom, Rs, REs, alpha) @@ -372,7 +374,7 @@ def relaxTipStrokes_omp(rTips, rs, fs, probeStart=1, relaxAlg=1, tip_spline=None def stiffnessMatrix(rTips, rPPs, which=0, ddisp=0.05, tip_spline=None): - print("py.core.stiffnessMatrix() ") + logger.debug("py.core.stiffnessMatrix() ") n = len(rTips) eigenvals = np.zeros((n, 3)) # this is really stupid solution because we cannot simply pass null pointer by ctypes; see : @@ -381,7 +383,7 @@ def stiffnessMatrix(rTips, rPPs, which=0, ddisp=0.05, tip_spline=None): evecs = [None, None, None] for i in range(which): evecs[i] = np.zeros((n, 3)) - print("py.core.stiffnessMatrix() 1 ") + logger.debug("py.core.stiffnessMatrix() 1 ") lib.stiffnessMatrix(ddisp, which, n, rTips, rPPs, eigenvals, evecs[0], evecs[1], evecs[2], tip_spline) return eigenvals, evecs diff --git a/ppafm/cpp_utils.py b/ppafm/cpp_utils.py index dac485da..5e267110 100644 --- a/ppafm/cpp_utils.py +++ b/ppafm/cpp_utils.py @@ -3,6 +3,10 @@ import platform from pathlib import Path +from .logging_utils import get_logger + +logger = get_logger("cpp_utils") + # Check for environment variable PPAFM_RECOMPILE to determine whether # we should recompile the C++ extensions. if "PPAFM_RECOMPILE" in os.environ and os.environ["PPAFM_RECOMPILE"] != "": @@ -22,8 +26,8 @@ PACKAGE_PATH = Path(__file__).resolve().parent CPP_PATH = PACKAGE_PATH.joinpath("cpp") -print(" PACKAGE_PATH = ", PACKAGE_PATH) -print(" CPP_PATH = ", CPP_PATH) +logger.debug(f" PACKAGE_PATH = {PACKAGE_PATH}") +logger.debug(f" CPP_PATH = {CPP_PATH}") cpp_modules = {"PP": "ProbeParticle", "GU": "GridUtils", "fitting": "fitting", "fitSpline": "fitSpline"} """Dictionary of C++ extension modules. Keys are targets for make and values are module names.""" @@ -85,5 +89,5 @@ def _build_windows(target): targets = [cpp_modules[target]] for module in targets: cmd = f'"{vars_path}" /no_logo /arch=amd64 && cl.exe /openmp /O2 /D_USRDLL /D_WINDLL {module}.cpp /link /dll /out:{module}{_lib_ext}' - print(cmd) + logger.debug(cmd) os.system(cmd) diff --git a/ppafm/data.py b/ppafm/data.py index 8329f104..1e33dc54 100755 --- a/ppafm/data.py +++ b/ppafm/data.py @@ -2,11 +2,16 @@ import tarfile import zipfile -from os import PathLike from pathlib import Path from tempfile import TemporaryDirectory +from typing import Union from urllib.request import urlretrieve +from .logging_utils import ProgressLogger, get_logger + +logger = get_logger("data") +_progress_logger_name = "data.progress" + DATASET_URLS = { "CO-tip-densities": "https://zenodo.org/records/14222456/files/CO_tip.zip?download=1", "dft-afm": "https://zenodo.org/records/10563098/files/dft-afm.tar.gz?download=1", @@ -25,24 +30,6 @@ } -def _print_progress(block_num: int, block_size: int, total_size: int): - if total_size == -1: - return - delta = block_size / total_size * 100 - current_size = block_num * block_size - percent = current_size / total_size * 100 - percent_int = int(percent) - if (percent - percent_int) > 1.0001 * delta: - # Only print when crossing an integer percentage - return - if block_num > 0: - print("\b\b\b", end="", flush=True) - if current_size < total_size: - print(f"{percent_int:2d}%", end="", flush=True) - else: - print("Done") - - def _common_parent(paths): if len(paths) == 1: return Path(paths[0]).parent @@ -58,30 +45,30 @@ def _common_parent(paths): def _extract_members(archive_handle, members, target_dir): - print(f"Extracting dataset to `{target_dir}`: ", end="", flush=True) + progress_logger = ProgressLogger(logger_name=_progress_logger_name, pre_message=f"Extracting dataset to `{target_dir}`: ") for i, m in enumerate(members): - _print_progress(i, 1, len(members)) + progress_logger.print_percent(i, 1, len(members)) archive_handle.extract(m, target_dir) - _print_progress(len(members), 1, len(members)) + progress_logger.print_percent(len(members), 1, len(members)) def _extract_targz(archive_path, target_dir): with tarfile.open(archive_path, "r") as ft: - print("Reading tar archive files...") + logger.debug("Reading tar archive files...") members = [] base_dir = _common_parent(ft.getnames()) for m in ft.getmembers(): if m.isfile(): # relative_to(base_dir) here gets rid of a common parent directory within the archive (if any), # which makes it so that we can just directly extract the files to the target directory. - m.name = Path(m.name).relative_to(base_dir) + m.name = str(Path(m.name).relative_to(base_dir)) members.append(m) _extract_members(ft, members, target_dir) def _extract_zip(archive_path, target_dir): with zipfile.ZipFile(archive_path, "r") as ft: - print("Reading zip archive files...") + logger.debug("Reading zip archive files...") members = [] base_dir = _common_parent(ft.namelist()) for m in ft.infolist(): @@ -93,7 +80,7 @@ def _extract_zip(archive_path, target_dir): _extract_members(ft, members, target_dir) -def download_dataset(name: str, target_dir: PathLike): +def download_dataset(name: str, target_dir: Union[Path, str]): """ Download and unpack a dataset to a target directory. @@ -126,13 +113,13 @@ def download_dataset(name: str, target_dir: PathLike): target_dir = Path(target_dir) if target_dir.exists() and any(target_dir.iterdir()): - print(f"Target directory `{target_dir}` exists and is not empty. Skipping downloading dataset `{name}`.") + logger.info(f"Target directory `{target_dir}` exists and is not empty. Skipping downloading dataset `{name}`.") return with TemporaryDirectory() as temp_dir: temp_file = Path(temp_dir) / f"dataset_{name}" - print(f"Downloading dataset `{name}`: ", end="") - _, response = urlretrieve(dataset_url, temp_file, _print_progress) + progress_logger = ProgressLogger(logger_name=_progress_logger_name, pre_message=f"Downloading dataset `{name}`: ") + _, response = urlretrieve(dataset_url, temp_file, reporthook=progress_logger.print_percent) original_file_name = response.get_filename() target_dir.mkdir(exist_ok=True, parents=True) if original_file_name.endswith(".tar.gz"): @@ -140,4 +127,4 @@ def download_dataset(name: str, target_dir: PathLike): elif original_file_name.endswith(".zip"): _extract_zip(temp_file, target_dir) else: - raise RuntimeError(f"Uknown file extension in `{original_file_name}`.") + raise RuntimeError(f"Unknown file extension in `{original_file_name}`.") diff --git a/ppafm/defaults/valelec_dict.py b/ppafm/defaults/valelec_dict.py index 86437b28..143a03d1 100644 --- a/ppafm/defaults/valelec_dict.py +++ b/ppafm/defaults/valelec_dict.py @@ -1,5 +1,9 @@ # total Valence Electron Charge to be subtracted to achieve electron neutrality. The numbers may vary depending on DFT code, pseudopotentials etc. +from ..logging_utils import get_logger + +logger = get_logger("valelec_dict") + valElDict = { 1: 1.0, 2: 2.0, @@ -25,4 +29,4 @@ 54: 8.0, } -print("inside valelec_dict.py valElDict=", valElDict) +logger.debug(f"inside valelec_dict.py valElDict={valElDict}") diff --git a/ppafm/fieldFFT.py b/ppafm/fieldFFT.py index b49cb2d2..ab14974b 100644 --- a/ppafm/fieldFFT.py +++ b/ppafm/fieldFFT.py @@ -5,12 +5,13 @@ import numpy as np from . import io +from .logging_utils import get_logger -verbose = 1 +logger = get_logger("fieldFFT") def fieldInfo(F, label="FieldInfo: min max av: "): - print(label, np.min(F), np.max(F), np.average(F)) + logger.info(f"{label} {np.min(F)} {np.max(F)} {np.average(F)}") def getSampleDimensions(lvec): @@ -59,48 +60,38 @@ def getSphericalHarmonic(X, Y, Z, kind="dz2", tilt=0.0): Z, X = rotZX(Z, X, tilt=tilt) # TODO: renormalization should be probaby here if kind == "s": - if verbose > 0: - print("Spherical harmonic: s") + logger.debug("Spherical harmonic: s") return 1.0 # p-functions elif kind == "px": - if verbose > 0: - print("Spherical harmonic: px") + logger.debug("Spherical harmonic: px") return X elif kind == "py": - if verbose > 0: - print("Spherical harmonic: py") + logger.debug("Spherical harmonic: py") return Y elif kind == "pz": - if verbose > 0: - print("Spherical harmonic: pz") + logger.debug("Spherical harmonic: pz") return Z # d-functions if kind == "dz2": - if verbose > 0: - print("Spherical harmonic: dz2") + logger.debug("Spherical harmonic: dz2") return 0.25 * ( 2 * Z**2 - X**2 - Y**2 ) # quadrupole normalized to get 3 times the quadrpole in the standard (cartesian) tensor normalization of Qzz. Also, 3D integral of rho_dz2(x,y,z)*(z/sigma)**2 gives 1 in the normalization used here. elif kind == "dx2": - if verbose > 0: - print("Spherical harmonic: dx2") + logger.debug("Spherical harmonic: dx2") return 0.25 * (2 * X**2 - Y**2 - Z**2) elif kind == "dy2": - if verbose > 0: - print("Spherical harmonic: dy2") + logger.debug("Spherical harmonic: dy2") return 0.25 * (2 * Y**2 - X**2 - Z**2) elif kind == "dxy": - if verbose > 0: - print("Spherical harmonic: dxy") + logger.debug("Spherical harmonic: dxy") return X * Y elif kind == "dxz": - if verbose > 0: - print("Spherical harmonic: dxz") + logger.debug("Spherical harmonic: dxz") return X * Z elif kind == "dyz": - if verbose > 0: - print("Spherical harmonic: dyz") + logger.debug("Spherical harmonic: dyz") return Y * Z else: return 0.0 @@ -108,9 +99,7 @@ def getSphericalHarmonic(X, Y, Z, kind="dz2", tilt=0.0): def getProbeDensity(sampleSize, X, Y, Z, dd, sigma=0.7, multipole_dict=None, tilt=0.0): "returns probe particle potential" - if verbose > 0: - print("sigma: ", sigma) - # exit() + logger.debug(f"sigma: {sigma}") mat = getNormalizedBasisMatrix(sampleSize).getT() rx = X * mat[0, 0] + Y * mat[0, 1] + Z * mat[0, 2] @@ -135,7 +124,7 @@ def addCoreDensity(rho, val, x, y, z, sigma=0.1): radial = np.exp(-rquad / (2 * sigma**2)) radial_renom = np.sum(radial) # TODO analytical renormalization may save some time ? radial *= val / float(radial_renom) - print("radial.sum()", radial.sum(), val) + logger.debug(f"radial.sum() {radial.sum()} {val}") rho += radial @@ -196,10 +185,8 @@ def getForces(V, rho, sampleSize, dims, dd, X, Y, Z): zeta[axis] = LmatInv[axis, 0] * dzetax * X zeta[axis] += LmatInv[axis, 1] * dzetay * Y zeta[axis] += LmatInv[axis, 2] * dzetaz * Z - if verbose > 0: - print("Ftrans : ", detLmatInv, zeta[0].sum(), zeta[1].sum(), zeta[2].sum()) - if verbose > 0: - print("derConvFFT ", derConvFFT.sum(), derConvFFT.min(), derConvFFT.max()) + logger.debug(f"Ftrans : {detLmatInv} {zeta[0].sum()} {zeta[1].sum()} {zeta[2].sum()}") + logger.debug(f"derConvFFT {derConvFFT.sum()} {derConvFFT.min()} {derConvFFT.max()}") forceSkewFFTx = zeta[0] * derConvFFT forceSkewFFTy = zeta[1] * derConvFFT forceSkewFFTz = zeta[2] * derConvFFT @@ -220,8 +207,7 @@ def getForceTransform(sampleSize, dims, dd, X, Y, Z): zeta[axis] = LmatInv[axis, 0] * dzetax * X zeta[axis] += LmatInv[axis, 1] * dzetay * Y zeta[axis] += LmatInv[axis, 2] * dzetaz * Z - if verbose > 0: - print("Ftrans : ", zeta[0].sum(), zeta[1].sum(), zeta[2].sum()) + logger.debug(f"Ftrans : {zeta[0].sum()} {zeta[1].sum()} {zeta[2].sum()}") return zeta[0], zeta[1], zeta[2], detLmatInv @@ -235,16 +221,16 @@ def getNormalizedBasisMatrix(sampleSize): def printMetadata(sampleSize, dims, dd, xsize, ysize, zsize, V, rho): first_col = 30 sec_col = 25 - print("basis transformation matrix:".rjust(first_col)) - print("sampleSize = \n", sampleSize) - print("Lmat = \n", getNormalizedBasisMatrix(sampleSize)) - print("number of data points:".rjust(first_col), " dims".rjust(sec_col), " = %s" % list(dims)) - print("specimen size:".rjust(first_col), "(xsize, ysize, zsize)".rjust(sec_col), f" = ({xsize}, {ysize}, {zsize})") - print("elementary lengths:".rjust(first_col), "(dx, dy, dz)".rjust(sec_col), " = (%.5f, %.5f, %.5f)" % dd) - print("V potential:".rjust(first_col), "(max, min)".rjust(sec_col), f" = ({V.max()}, {V.min()})") - print("".rjust(first_col), "V.shape".rjust(sec_col), " = %s" % list(V.shape)) - print("probe potential:".rjust(first_col), "(max, min)".rjust(sec_col), f" = ({rho.max()}, {rho.min()})") - print("".rjust(first_col), "rho.shape".rjust(sec_col), " = %s" % list(rho.shape)) + logger.info("basis transformation matrix:".rjust(first_col)) + logger.info("sampleSize = \n".rjust(first_col) + str(sampleSize)) + logger.info("Lmat = \n".rjust(first_col) + str(getNormalizedBasisMatrix(sampleSize))) + logger.info("number of data points:".rjust(first_col) + " dims".rjust(sec_col) + f" = {list(dims)}") + logger.info("specimen size:".rjust(first_col) + "(xsize, ysize, zsize)".rjust(sec_col) + f" = ({xsize}, {ysize}, {zsize})") + logger.info("elementary lengths:".rjust(first_col) + "(dx, dy, dz)".rjust(sec_col) + f" = ({dd[0]:.5f}, {dd[1]:.5f}, {dd[2]:.5f})") + logger.info("V potential:".rjust(first_col) + "(max, min)".rjust(sec_col) + f" = ({V.max()}, {V.min()})") + logger.info("".rjust(first_col) + "V.shape".rjust(sec_col) + f" = {list(V.shape)}") + logger.info("probe potential:".rjust(first_col) + "(max, min)".rjust(sec_col) + f" = ({rho.max()}, {rho.min()})") + logger.info("".rjust(first_col) + "rho.shape".rjust(sec_col) + f" = {list(rho.shape)}") def exportPotential(rho, rho_data="rho_data"): @@ -258,8 +244,7 @@ def exportPotential(rho, rho_data="rho_data"): def potential2forces(V, lvec, nDim, sigma=0.7, rho=None, multipole=None, tilt=0.0): fieldInfo(V, label="fieldInfo V ") - if verbose > 0: - print("--- Preprocessing ---") + logger.debug("--- Preprocessing ---") sampleSize = getSampleDimensions(lvec) dims = (nDim[2], nDim[1], nDim[0]) xsize, dx = getSize("x", dims, sampleSize) @@ -269,23 +254,20 @@ def potential2forces(V, lvec, nDim, sigma=0.7, rho=None, multipole=None, tilt=0. X, Y, Z = getMGrid(dims, dd) fieldInfo(Z, label="fieldInfo Z ") if rho == None: - if verbose > 0: - print("--- Get Probe Density ---") + logger.debug("--- Get Probe Density ---") rho = getProbeDensity(sampleSize, X, Y, Z, dd, sigma=sigma, multipole_dict=multipole, tilt=tilt) else: rho[:, :, :] = rho[::-1, ::-1, ::-1].copy() fieldInfo(rho, label="fieldInfo rho ") - if verbose > 0: - print("--- Get Forces ---") + logger.debug("--- Get Forces ---") Fx, Fy, Fz = getForces(V, rho, sampleSize, dims, dd, X, Y, Z) fieldInfo(Fz, label="fieldInfo Fz ") - if verbose > 0: - print("Fz.max(), Fz.min() = ", Fz.max(), Fz.min()) + logger.debug(f"Fz.max(), Fz.min() = {Fz.max()}, {Fz.min()}") return Fx, Fy, Fz def potential2forces_mem(V, lvec, nDim, sigma=0.7, rho=None, multipole=None, doForce=True, doPot=False, deleteV=True, tilt=0.0): - print("--- Preprocessing ---") + logger.debug("--- Preprocessing ---") sampleSize = getSampleDimensions(lvec) dims = (nDim[2], nDim[1], nDim[0]) @@ -294,46 +276,37 @@ def potential2forces_mem(V, lvec, nDim, sigma=0.7, rho=None, multipole=None, doF zsize, dz = getSize("z", dims, sampleSize) dd = (dx, dy, dz) - if verbose > 0: - print("potential2forces_mem: dims ", dims) - if verbose > 0: - print("potential2forces_mem: dims ", dd) + logger.debug(f"potential2forces_mem: dims {dims}") + logger.debug(f"potential2forces_mem: dims {dd}") - if verbose > 0: - print("--- X, Y, Z ---") + logger.debug("--- X, Y, Z ---") X, Y, Z = getMGrid(dims, dd) if rho is None: - if verbose > 0: - print("--- Get Probe Density ---") + logger.debug("--- Get Probe Density ---") rho = getProbeDensity(sampleSize, X, Y, Z, dd, sigma=sigma, multipole_dict=multipole, tilt=tilt) io.saveXSF("rhoTip.xsf", rho, lvec) if doForce: - if verbose > 0: - print("--- prepare Force transforms ---") + logger.debug("--- prepare Force transforms ---") zetaX, zetaY, zetaZ, detLmatInv = getForceTransform(sampleSize, dims, dd, X, Y, Z) del X, Y, Z E = None Fx = None Fy = None Fz = None - if verbose > 0: - print("--- forward FFT ---") + logger.debug("--- forward FFT ---") gc.collect() convFFT = np.fft.fftn(V) * np.conj(np.fft.fftn(rho)) if deleteV: del V gc.collect() if doPot: - if verbose > 0: - print("--- Get Potential ---") + logger.debug("--- Get Potential ---") E = np.real(np.fft.ifftn(convFFT * (dd[0] * dd[1] * dd[2]) / (detLmatInv))) if doForce: - if verbose > 0: - print("--- Get Forces ---") + logger.debug("--- Get Forces ---") convFFT *= -2 * np.pi * 1j * (dd[0] * dd[1] * dd[2]) / (detLmatInv) - if verbose > 0: - print("derConvFFT ", convFFT.sum(), convFFT.min(), convFFT.max()) + logger.debug(f"derConvFFT {convFFT.sum()} {convFFT.min()} {convFFT.max()}") Fx = np.real(np.fft.ifftn(zetaX * convFFT)) del zetaX gc.collect() @@ -343,8 +316,7 @@ def potential2forces_mem(V, lvec, nDim, sigma=0.7, rho=None, multipole=None, doF Fz = np.real(np.fft.ifftn(zetaZ * convFFT)) del zetaZ gc.collect() - if verbose > 0: - print("Fz.max(), Fz.min() = ", Fz.max(), Fz.min()) + logger.debug(f"Fz.max(), Fz.min() = {Fz.max()}, {Fz.min()}") return Fx, Fy, Fz, E @@ -354,8 +326,7 @@ def Average_surf(Val_surf, W_surf, W_tip): | (R) = ----------------------------------------- = -----------------------------; where * means convolution | Int_r W_tip(r) W_sample(r+R) W_tip * W_sample """ - if verbose > 0: - print("Forward FFT ") + logger.debug("Forward FFT ") kE_tip = np.fft.fftn(W_tip[::-1, ::-1, ::-1]) # W_tip kE_surf = np.fft.fftn(W_surf) # W_sample kFE_surf = np.fft.fftn(W_surf * Val_surf) # (Val_surf W_surf) @@ -371,8 +342,7 @@ def Average_surf(Val_surf, W_surf, W_tip): del kE_surf del kFE_surf - if verbose > 0: - print("Backward FFT ") + logger.debug("Backward FFT ") E = np.fft.ifftn(kE) FE = np.fft.ifftn(kFE) @@ -388,8 +358,7 @@ def Average_tip(Val_tip, W_surf, W_tip): | (R) = ----------------------------------------- = -----------------------------; where * means convolution | Int_r W_surf(r) W_sample(r+R) W_tip * W_sample """ - if verbose > 0: - print("Forward FFT ") + logger.debug("Forward FFT ") kE_tip = np.fft.fftn(W_tip[::-1, ::-1, ::-1]) # W_tip kE_surf = np.fft.fftn(W_surf) # W_sample kFE_tip = np.fft.fftn(W_tip[::-1, ::-1, ::-1] * (-1) * Val_tip[::-1, ::-1, ::-1]) # (Val_tip W_tip) @@ -405,8 +374,7 @@ def Average_tip(Val_tip, W_surf, W_tip): del kE_surf del kFE_tip - if verbose > 0: - print("Backward FFT ") + logger.debug("Backward FFT ") E = np.fft.ifftn(kE) FE = np.fft.ifftn(kFE) diff --git a/ppafm/fitSpline.py b/ppafm/fitSpline.py index be778e97..0b870e85 100755 --- a/ppafm/fitSpline.py +++ b/ppafm/fitSpline.py @@ -4,6 +4,9 @@ import numpy as np from . import cpp_utils +from .logging_utils import get_logger + +logger = get_logger("fitSpline") c_double_p = ctypes.POINTER(c_double) c_int_p = ctypes.POINTER(c_int) @@ -49,7 +52,7 @@ def convolve1D(coefs, x, y=None, di=1): else: nx /= di y = np.zeros(nx) - print("di ", di, m) + logger.debug(f"di {di} m {m}") lib.convolve1D(m, di, nx, _np_as(coefs, c_double_p), _np_as(x, c_double_p), _np_as(y, c_double_p)) return y @@ -121,14 +124,14 @@ def solveCG(A, b, x=None, maxIters=100, maxErr=1e-6): def fit_tensorProd_2D(BYref=None, Yref=None, basis_coefs=None, kernel_coefs=None, Ycoefs=None, maxIters=100, maxErr=1e-6, di=1, nConvPerCG=1): if BYref is None: - print(" fit_tensorProd BYref = Basis * Yref ") + logger.debug(" fit_tensorProd BYref = Basis * Yref ") BYref = convolve2D_tensorProduct(basis_coefs, Yref, di=di) if Ycoefs is None: Ycoefs = np.zeros(BYref.shape) nx, ny = BYref.shape - print(" >> fit_tensorProd ... ") + logger.debug(" >> fit_tensorProd ... ") if kernel_coefs is None: - print(" NO KERNEL => use basis with nConvPerCG==2") + logger.debug(" NO KERNEL => use basis with nConvPerCG==2") lib.fit_tensorProd_2D(len(basis_coefs) / 2, nx, ny, _np_as(basis_coefs, c_double_p), _np_as(BYref, c_double_p), _np_as(Ycoefs, c_double_p), maxIters, maxErr, 2) else: nker = len(kernel_coefs) @@ -143,14 +146,14 @@ def fit_tensorProd_2D(BYref=None, Yref=None, basis_coefs=None, kernel_coefs=None def fit_tensorProd_3D(BYref=None, Yref=None, basis_coefs=None, kernel_coefs=None, Ycoefs=None, maxIters=100, maxErr=1e-6, di=1, nConvPerCG=1): if BYref is None: - print(" fit_tensorProd BYref = Basis * Yref ") + logger.debug(" fit_tensorProd BYref = Basis * Yref ") BYref = convolve3D_tensorProduct(basis_coefs, Yref, di=di) if Ycoefs is None: Ycoefs = np.zeros(BYref.shape) nx, ny, nz = BYref.shape - print(" >> fit_tensorProd ... ") + logger.debug(" >> fit_tensorProd ... ") if kernel_coefs is None: - print(" NO KERNEL => use basis with nConvPerCG==2") + logger.debug(" NO KERNEL => use basis with nConvPerCG==2") lib.fit_tensorProd_3D(len(basis_coefs) / 2, nx, ny, nz, _np_as(basis_coefs, c_double_p), _np_as(BYref, c_double_p), _np_as(Ycoefs, c_double_p), maxIters, maxErr, 2) else: nker = len(kernel_coefs) @@ -185,7 +188,7 @@ def step_fit_tensorProd(): def upSwizzle(coefs, di): n = int(np.ceil(float(len(coefs)) / di)) cs = np.zeros((di, n)) - print("cs.shape ", cs.shape) + logger.debug(f"cs.shape {cs.shape}") for i in range(di): csi = coefs[i::di] cs[i, -len(csi) :] = csi @@ -235,7 +238,7 @@ def conv1D(xs, ys): ys_ = np.zeros(ntot) dnx = (nx / 2) + 1 dny = (ny / 2) + 1 - print(nx, ny, ntot, dnx, dny, len(xs_[dny:-dny])) + logger.debug(f"{nx} {ny} {ntot} {dnx} {dny} {len(xs_[dny:-dny])}") xs_[dny : -dny - 1] = xs ys_[dnx : -dnx - 1] = ys conv = np.real(np.fft.ifft(np.fft.fft(xs_) * np.fft.fft(ys_))) # Keep it Real ! @@ -253,20 +256,21 @@ def imfig(data, title): plt.colorbar() sp = BsplineCubic(np.array([-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0])) - print(sp / sp.sum()) + logger.debug(f"{sp / sp.sum()}") coefs3_2 = np.array([0.01041667, 0.08333333, 0.23958333, 0.33333333, 0.23958333, 0.08333333, 0.01041667]) # *2 - print("np.outer(coefs3_2,coefs3_2).sum() ", np.outer(coefs3_2, coefs3_2).sum()) + logger.debug(f"np.outer(coefs3_2,coefs3_2).sum() {np.outer(coefs3_2, coefs3_2).sum()}") np.set_printoptions(precision=None, linewidth=200) coefs3 = np.array([1.0, 4.0, 1.0]) / 6 - coefs6 = np.array([7.14285714e-03, 8.57142857e-01, 8.50714286e00, 1.72571429e01, 8.50714286e00, 8.57142857e-01, 7.14285714e-03]) / (2 * 1.72571429e01) # ;print coefs6 + coefs6 = np.array([7.14285714e-03, 8.57142857e-01, 8.50714286e00, 1.72571429e01, 8.50714286e00, 8.57142857e-01, 7.14285714e-03]) / (2 * 1.72571429e01) + logger.debug(f"coeffs6: {coefs6}") coefs5 = np.array([0.1, 0.2, 0.4, 0.2, 0.1]) coefs3_ker = conv1D(coefs3_2, coefs3_2) - print(coefs3_ker) + logger.debug(f"{coefs3_ker}") coefs3_ker_down = coefs3_ker[:-2:2].copy() plt.plot(coefs3_2, ".-") plt.plot(coefs3_ker, ".-") @@ -280,9 +284,9 @@ def imfig(data, title): y3D[5, 7, 2] = 0 y3D[3:5, 3, 4:7] = 0 - print("coefs3 ", coefs3) + logger.debug(f"coefs3 {coefs3}") y3D_conv = convolve3D_tensorProduct(coefs3, y3D, di=1) - print("min,max y3d_conv ", y3D_conv.min(), y3D_conv.max()) + logger.debug(f"min,max y3d_conv {y3D_conv.min()} {y3D_conv.max()}") iz_view = 4 diff --git a/ppafm/fitting.py b/ppafm/fitting.py index 544a1697..eea582cc 100755 --- a/ppafm/fitting.py +++ b/ppafm/fitting.py @@ -4,6 +4,9 @@ import numpy as np from . import common, cpp_utils, io +from .logging_utils import get_logger + +logger = get_logger("fitting") c_double_p = ctypes.POINTER(c_double) c_int_p = ctypes.POINTER(c_int) @@ -63,7 +66,7 @@ def getProjections(ps, Yrefs, centers, types, ncomps, By=None, BB=None): ndim = Yrefs.shape nps = 1 if len(ndim) > 1: - print("ndim ", ndim) + logger.debug(f"ndim {ndim}") for ni in ndim: nps *= ni else: @@ -96,7 +99,7 @@ def project(ps, Youts, centers, types, ncomps, coefs): ndim = Youts.shape nps = 1 if len(ndim) > 1: - print("ndim ", ndim) + logger.debug(f"ndim {ndim}") for ni in ndim: nps *= ni else: @@ -129,7 +132,7 @@ def debugGeomPBC_xsf(centers): atoms, nDim, lvec = io.loadGeometry(fname_ext, parameters=parameters) centers = np.array(atoms[1:4]).transpose().copy() - print("centers \n", centers) + logger.info(f"centers \n{centers}") import sys @@ -141,29 +144,29 @@ def debugGeomPBC_xsf(centers): RFuncs = data[1:, :].copy() rfsh = RFuncs.shape - print("RFunc.shape() ", rfsh) + logger.info(f"RFunc.shape() {rfsh}") fitting.setSplines(zs[1] - zs[0], 5.0, RFuncs) - print("nDim ", nDim) + logger.info(f"nDim {nDim}") fitting.setPBC(lvec[1:], npbc=[1, 1, 1]) types_header = [1, 6, 7] typedict = {k: i for i, k in enumerate(types_header)} types = np.array([typedict[elem] for elem in atoms[0]], dtype=np.int32) - print("types ", types) + logger.info(f"types {types}") ncomps = np.ones(len(types), dtype=np.int32) Yrefs, lvec, nDim, head = io.loadXSF(fname_ext) gridPoss = PPU.getPos_Vec3d(np.array(lvec), nDim) - print("gridPoss.shape, yrefs.shape, centers.shape ", gridPoss.shape, Yrefs.shape, centers.shape) + logger.info(f"gridPoss.shape, yrefs.shape, centers.shape {gridPoss.shape} {Yrefs.shape} {centers.shape}") coefs = np.ones(len(centers)) * 1.2 - print(">>>>>> Yrefs -= project( coefs ) ") + logger.info(">>>>> Yrefs -= project( coefs ) ") fitting.project(gridPoss, Yrefs, centers, types, ncomps, coefs * -1.0) io.saveXSF("Yresidual.xsf", Yrefs, lvec) exit() - print(" **** ALL DONE *** ") + logger.info(" **** ALL DONE *** ") diff --git a/ppafm/io.py b/ppafm/io.py index 79763cb1..81c931b2 100644 --- a/ppafm/io.py +++ b/ppafm/io.py @@ -8,11 +8,12 @@ from . import elements from .GridUtils import readNumsUpTo +from .logging_utils import ProgressLogger, get_logger bohrRadius2angstroem = 0.5291772109217 Hartree2eV = 27.211396132 -verbose = 0 +logger = get_logger("io") def loadXYZ(fname): @@ -259,19 +260,15 @@ def loadXSFGeom(fname): ws = f.readline().split() lvec.append([float(ws[0]), float(ws[1]), float(ws[2])]) f.close() - if verbose > 0: - print("nDim+1", nDim) + logger.debug(f"nDim+1 {nDim}") nDim = (nDim[0] - 1, nDim[1] - 1, nDim[2] - 1) - if verbose > 0: - print("lvec", lvec) - if verbose > 0: - print("reading ended") + logger.debug(f"lvec {lvec}") + logger.debug("reading ended") return [e, x, y, z, q], nDim, lvec def loadNPYGeom(fname): - if verbose > 0: - print("loading atoms") + logger.debug("loading atoms") tmp = np.load(fname + "_atoms.npy") e = tmp[0] x = tmp[1] @@ -279,20 +276,15 @@ def loadNPYGeom(fname): z = tmp[3] q = tmp[4] del tmp - if verbose > 0: - print("loading lvec") + logger.debug("loading lvec") lvec = np.load(fname + "_vec.npy") - if verbose > 0: - print("loading nDim") + logger.debug("loading nDim") tmp = np.load(fname + "_z.npy") nDim = tmp.shape del tmp - if verbose > 0: - print("nDim", nDim) - if verbose > 0: - print("lvec", lvec) - if verbose > 0: - print("e,x,y,z", e, x, y, z) + logger.debug(f"nDim {nDim}") + logger.debug(f"lvec {lvec}") + logger.debug(f"e,x,y,z {e}, {x}, {y}, {z}") return [e, x, y, z, q], nDim, lvec @@ -331,8 +323,7 @@ def loadAtomsCUBE(fname): def primcoords2Xsf(iZs, xyzs, lvec): import io as SIO - if verbose > 0: - print("lvec: ", lvec) + logger.debug(f"lvec: {lvec}") sio = SIO.StringIO() sio.write("CRYSTAL\n") sio.write("PRIMVEC\n") @@ -353,7 +344,6 @@ def primcoords2Xsf(iZs, xyzs, lvec): sio.write("some_datagrid\n") sio.write("BEGIN_DATAGRID_3D_whatever\n") s = sio.getvalue() - # print s; exit() return s @@ -406,8 +396,7 @@ def loadNCUBE(fname): def loadGeometry(fname=None, format=None, parameters=None): - if verbose > 0: - print("loadGeometry ", fname) + logger.debug(f"loadGeometry {fname}") if fname == None: raise ValueError("Please provide the name of the file with coordinates") if parameters == None: @@ -583,9 +572,8 @@ def _orthoLvec(sh, dd): """ -def saveXSF(fname, data, lvec=None, dd=None, head=XSF_HEAD_DEFAULT, verbose=1): - if verbose > 0: - print("Saving xsf", fname) +def saveXSF(fname, data, lvec=None, dd=None, head=XSF_HEAD_DEFAULT): + logger.debug(f"Saving xsf {fname}") fileout = open(fname, "w") if lvec is None: if dd is None: @@ -608,7 +596,8 @@ def saveXSF(fname, data, lvec=None, dd=None, head=XSF_HEAD_DEFAULT, verbose=1): fileout.write("END_BLOCK_DATAGRID_3D\n") -def loadXSF(fname, xyz_order=False, verbose=True): +def loadXSF(fname, xyz_order=False): + logger.debug(f"Loading xsf {fname}") filein = open(fname) startline, head = _readUpTo(filein, "BEGIN_DATAGRID_3D") # startline - number of the line with DATAGRID_3D_. Dinensions are located in the next line nDim = [int(iii) for iii in filein.readline().split()] # reading 1 line with dimensions @@ -616,13 +605,10 @@ def loadXSF(fname, xyz_order=False, verbose=True): nDim = np.array(nDim) lvec = _readmat(filein, 4) # reading 4 lines where 1st line is origin of datagrid and 3 next lines are the cell vectors filein.close() - if verbose: - print("nDim xsf (= nDim + [1,1,1] ):", nDim) - if verbose: - print("io | Load " + fname + " using readNumsUpTo ") + logger.debug(f"nDim xsf (= nDim + [1,1,1] ) {nDim}") + logger.debug(f"io | Load {fname} using readNumsUpTo") F = readNumsUpTo(fname, nDim.astype(np.int32).copy(), startline + 5) - if verbose: - print("io | Done") + logger.debug("io | Done") FF = np.reshape(F, nDim)[:-1, :-1, :-1] if xyz_order: FF = FF.transpose((2, 1, 0)) @@ -651,7 +637,7 @@ def getFromHead_PRIMCOORD(head): # =================== Cube -def loadCUBE(fname, xyz_order=False, verbose=True): +def loadCUBE(fname, xyz_order=False): filein = open(fname) # First two lines of the header are comments filein.readline() @@ -671,14 +657,11 @@ def loadCUBE(fname, xyz_order=False, verbose=True): lvec[2, jj] = float(sth2[jj + 1]) * int(sth2[0]) * bohrRadius2angstroem lvec[3, jj] = float(sth3[jj + 1]) * int(sth3[0]) * bohrRadius2angstroem - if verbose: - print("io | Load " + fname + " using readNumsUpTo") + logger.debug(f"io | Load {fname} using readNumsUpTo") noline = 6 + int(sth0[0]) F = readNumsUpTo(fname, nDim.astype(np.int32).copy(), noline) - if verbose: - print("io | np.shape(F): ", np.shape(F)) - if verbose: - print("io | nDim: ", nDim) + logger.debug(f"io | np.shape(F): {np.shape(F)}") + logger.debug(f"io | nDim: {nDim}") FF = np.reshape(F, nDim) if not xyz_order: @@ -718,8 +701,9 @@ def saveWSxM_3D(prefix, data, extent, slices=None): xs = np.linspace(extent[0], extent[1], nDim[2]) ys = np.linspace(extent[2], extent[3], nDim[1]) Xs, Ys = np.meshgrid(xs, ys) - for i in slices: - print("slice no: ", i) + progress_logger = ProgressLogger("saveWSxM_3D", pre_message="slice no: ") + for ii, i in enumerate(slices): + progress_logger.print_message(i, is_last=ii == (len(slices) - 1)) fname = prefix + "_%03d.xyz" % i saveWSxM_2D(fname, data[i], Xs, Ys) @@ -857,7 +841,8 @@ def save_vec_field(fname, data, lvec, data_format="xsf", head=XSF_HEAD_DEFAULT, atomic_info = atomic_info if atomic_info is not None else (np.zeros((4, 1)), lvec) saveVecFieldNpy(fname, data, lvec, atomic_info) else: - print("I cannot save this format!") + # Should this raise an error instead of just a log event? + logger.error(f"Cannot save vector field in format: {data_format}") def load_vec_field(fname, data_format="xsf"): @@ -883,7 +868,7 @@ def load_vec_field(fname, data_format="xsf"): data, lvec, atomic_info_or_head = loadVecFieldNpy(fname) ndim = data.shape else: - print("I cannot load this format!") + raise ValueError(f"Cannot load vector field in format: {data_format}") return data.copy(), lvec, ndim, atomic_info_or_head @@ -908,7 +893,8 @@ def save_scal_field(fname, data, lvec, data_format="xsf", head=XSF_HEAD_DEFAULT, atomic_info = atomic_info if atomic_info is not None else (np.zeros((4, 1)), lvec) saveNpy(fname, data, lvec, atomic_info) else: - print("I cannot save this format!") + # Should this raise an error instead of just a log event? + logger.error(f"Cannot save scalar field in format: {data_format}") def load_scal_field(fname, data_format="xsf"): @@ -935,7 +921,7 @@ def load_scal_field(fname, data_format="xsf"): elif data_format == "cube": data, lvec, ndim, atomic_info_or_head = loadCUBE(fname + ".cube") else: - print("I cannot load this format!") + raise ValueError(f"Cannot load scalar field in format: {data_format}") return data.copy(), lvec, ndim, atomic_info_or_head diff --git a/ppafm/logging_utils.py b/ppafm/logging_utils.py new file mode 100644 index 00000000..896a55d7 --- /dev/null +++ b/ppafm/logging_utils.py @@ -0,0 +1,181 @@ +import logging +import os +import sys +from typing import Any, Optional, Union + +# Setup the root logger for the ppafm package. All other loggers will be derived from this one. +_root_logger = logging.getLogger("ppafm") +_log_format = "%(message)s" +_log_handler = None +_log_path = None + +# Setup another logger for performance benchmarking +_perf_logger = logging.getLogger("ppafm.perf") +_perf_logger.propagate = False +_perf_log_format = "[%(asctime)s - %(name)s] %(message)s" +_perf_log_enabled = False + + +class _DefaultFormatter(logging.Formatter): + + def __init__(self): + super().__init__() + self.info_formatter = logging.Formatter("%(message)s") + self.other_formatter = logging.Formatter("%(levelname)s: %(message)s") + + def format(self, record): + if record.levelno == logging.INFO: + formatter = self.info_formatter + else: + formatter = self.other_formatter + return formatter.format(record) + + +def configure_logging( + level: Optional[Union[int, str]] = None, + format: Optional[str] = None, + log_path: Optional[str] = None, + log_performance: Optional[bool] = None, +): + """Configure options for the ppafm logger. + + Arguments: + level: Logging level to use. See https://docs.python.org/3/library/logging.html#levels. + If None, the level is determined from the PPAFM_LOG_LEVEL environment variable or + defaults to logging.INFO. + format: Format to use for logging messages. See https://docs.python.org/3/library/logging.html#logrecord-attributes. + If None, the format is determined from the PPAFM_LOG_FORMAT environment variable or + defaults to "[%(asctime)s - %(name)s - %(levelname)s] %(message)s" if logging level is DEBUG, + or otherwise '%(message)s' for INFO and `%(levelname): %(message)s` for more severe levels. + log_path: Path where log will be written. If None, the path is determined from the the PPAFM_LOG_PATH + environment variable, or defaults to stdout. + log_performance: Whether to enable performance logging. If None, is True if the PPAFM_LOG_PERFORMANCE + environment variable is set, otherwise False. + """ + global _root_logger + global _log_handler + global _log_format + global _log_path + global _perf_log_enabled + + if level is None: + try: + level = os.environ["PPAFM_LOG_LEVEL"] + except KeyError: + level = logging.INFO + _root_logger.setLevel(level) + + if log_path is None: + try: + log_path = os.environ["PPAFM_LOG_PATH"] + except KeyError: + pass + _log_path = log_path + + if _log_path is None: + _log_handler = logging.StreamHandler(sys.stdout) + else: + _log_handler = logging.FileHandler(_log_path) + + for handler in _root_logger.handlers: + _root_logger.removeHandler(handler) + _root_logger.addHandler(_log_handler) + + if log_performance is None: + log_performance = "PPAFM_LOG_PERFORMANCE" in os.environ + if log_performance: + if _log_path is None: + perf_log_handler = logging.StreamHandler(sys.stdout) + else: + perf_log_handler = logging.FileHandler(_log_path) + perf_log_handler.setFormatter(logging.Formatter(fmt=_perf_log_format)) + for handler in _perf_logger.handlers: + _perf_logger.removeHandler(handler) + _perf_logger.addHandler(perf_log_handler) + _perf_logger.setLevel(logging.INFO) + _perf_log_enabled = True + else: + _perf_logger.setLevel(logging.CRITICAL) + _perf_log_enabled = False + + if format is None: + try: + _log_format = os.environ["PPAFM_LOG_FORMAT"] + formatter = logging.Formatter(fmt=_log_format) + except KeyError: + if _root_logger.level == logging.DEBUG: + _log_format = "[%(asctime)s - %(name)s - %(levelname)s] %(message)s" + formatter = logging.Formatter(fmt=_log_format) + else: + _log_format = "%(message)s" + formatter = _DefaultFormatter() + else: + _log_format = format + formatter = logging.Formatter(fmt=_log_format) + _log_handler.setFormatter(formatter) + + +def get_logger(name: str) -> logging.Logger: + return _root_logger.getChild(name) + + +def get_perf_logger(name: str) -> logging.Logger: + return _perf_logger.getChild(name) + + +def perf_log_enabled() -> bool: + return _perf_log_enabled + + +class ProgressLogger: + """Print gradual progress messages.""" + + def __init__(self, logger_name: str = "progress", pre_message: str = ""): + + self.logger = get_logger(logger_name) + self.logger.propagate = False # So that the parent handlers don't also print stuff + + # Remove any existing handlers if this logger name was used before + for handler in self.logger.handlers: + self.logger.removeHandler(handler) + + if _log_path is None: + # We are printing to terminal. Setup a handler that prints all message to the same line. + handler = logging.StreamHandler(sys.stdout) + formatter = logging.Formatter("\r" + _log_format) # Carriage return deletes the previous message + handler.terminator = "" # Don't print a new line + self._percent_increment = 1 + self._print_to_terminal = True + else: + # We are printing to a file. Print normally to subsequent lines. + handler = logging.FileHandler(_log_path) + formatter = logging.Formatter(_log_format) + self._percent_increment = 20 # Don't print as often, so we don't spam the log file too much + self._print_to_terminal = False + handler.setFormatter(formatter) + self.logger.addHandler(handler) + + self.pre_message = pre_message + self._previous_percent = -self._percent_increment + + def print_percent(self, block_num: int, block_size: int, total_size: int): + if total_size == -1: + return + current_size = block_num * block_size + percent = int(current_size / total_size * 100) + if percent < (self._previous_percent + self._percent_increment): + return + self._previous_percent = percent + if current_size < total_size: + self.logger.info(f"{self.pre_message}{percent:2d}%") + else: + msg = f"{self.pre_message}Done" + if self._print_to_terminal: + msg += "\n" + self.logger.info(msg) + + def print_message(self, message: Any, is_last: bool): + message = f"{self.pre_message}{message}" + if is_last and self._print_to_terminal: + message += "\n" + self.logger.info(message) diff --git a/ppafm/ml/CorrectionLoop.py b/ppafm/ml/CorrectionLoop.py index 41d96fff..e7c4ef49 100644 --- a/ppafm/ml/CorrectionLoop.py +++ b/ppafm/ml/CorrectionLoop.py @@ -15,14 +15,12 @@ """ -import matplotlib import numpy as np -import pyopencl as cl -from .. import atomicUtils as au from .. import common as PPU -from .. import elements, io +from .. import io from ..dev import SimplePot as sp +from ..logging_utils import get_logger from ..ocl import AFMulator from ..ocl import field as FFcl from ..ocl import oclUtils as oclu @@ -30,8 +28,7 @@ from . import AuxMap, Generator from .Corrector import Corrector, Molecule, Mutator -verbose = 0 -bRunTime = False +logger = get_logger("CorrectionLoop") # ======================================================================== @@ -106,8 +103,7 @@ def generatePair(self): def __getitem__(self, index): self.index = index - if verbose > 0: - print("index ", index) + logger.debug(f"index {index}") return next(self) def __iter__(self): @@ -386,7 +382,7 @@ def Job_trainCorrector(simulator, geom_fname="input.xyz", nstep=10): scan_center = np.array([sw[1][0] + sw[0][0], sw[1][1] + sw[0][1]]) / 2 xyzs[:, :2] += scan_center - xyzs[:, :2].mean(axis=0) xyzs[:, 2] += (sw[1][2] - 9.0) - xyzs[:, 2].max() - print("xyzs ", xyzs) + logger.debug(f"xyzs {xyzs}") mol = Molecule(xyzs, Zs, qs) trainer.start(mol) @@ -450,7 +446,7 @@ def Job_CorrectionLoop(simulator, atoms, bonds, geom_fname="input.xyz", nstep=10 scan_center = np.array([sw[1][0] + sw[0][0], sw[1][1] + sw[0][1]]) / 2 xyzs[:, :2] += scan_center - xyzs[:, :2].mean(axis=0) xyzs[:, 2] += (sw[1][2] - 9.0) - xyzs[:, 2].max() - print("xyzs ", xyzs) + logger.debug(f"xyzs {xyzs}") xyzqs = np.concatenate([xyzs, qs[:, None]], axis=1) np.save("./Atoms.npy", atoms(xyzqs, Zs)) @@ -461,9 +457,9 @@ def Job_CorrectionLoop(simulator, atoms, bonds, geom_fname="input.xyz", nstep=10 looper.startLoop(molecule, atomMap, bondMap, lvecMap, AFMRef) ErrConv = 0.1 - print("# ------ To Loop ") + logger.info("# ------ To Loop ") for itr in range(nstep): - print("# ======= CorrectionLoop[ %i ] ", itr) + logger.info(f"# ======= CorrectionLoop[ {itr} ] ") Err = looper.iteration(itr=itr) if Err < ErrConv: break @@ -481,27 +477,27 @@ def Job_CorrectionLoop(simulator, atoms, bonds, geom_fname="input.xyz", nstep=10 parser.add_option("-j", "--job", action="store", type="string", help="[train/loop]") (options, args) = parser.parse_args() - print(" UNIT_TEST START : CorrectionLoop ... ") + logger.info(" UNIT_TEST START : CorrectionLoop ... ") - print("# ------ Init Generator ") + logger.info("# ------ Init Generator ") i_platform = 0 env = oclu.OCLEnvironment(i_platform=i_platform) FFcl.init(env) oclr.init(env) - # fmt: off afmulator = AFMulator.AFMulator( pixPerAngstrome=10, - lvec=np.array([ - [0.0, 0.0, 0.0], - [20.0, 0.0, 0.0], - [0.0, 20.0, 0.0], - [0.0, 0.0, 5.0] - ]), + lvec=np.array( + [ + [0.0, 0.0, 0.0], + [20.0, 0.0, 0.0], + [0.0, 20.0, 0.0], + [0.0, 0.0, 5.0], + ] + ), scan_window=((2.0, 2.0, 5.0), (18.0, 18.0, 8.0)), ) - # fmt: on atoms = AuxMap.AtomRfunc(scan_dim=(128, 128), scan_window=((2, 2), (18, 18))) bonds = AuxMap.Bonds(scan_dim=(128, 128), scan_window=((2, 2), (18, 18))) @@ -511,6 +507,6 @@ def Job_CorrectionLoop(simulator, atoms, bonds, geom_fname="input.xyz", nstep=10 elif options.job == "train": Job_trainCorrector(afmulator, geom_fname="pos_out3.xyz", nstep=10) else: - print("ERROR : invalid job ", options.job) + logger.error(f"invalid job {options.job}") - print(" UNIT_TEST CorrectionLoop DONE !!! ") + logger.info(" UNIT_TEST CorrectionLoop DONE !!! ") diff --git a/ppafm/ml/Corrector.py b/ppafm/ml/Corrector.py index 6c468a84..f1c06d09 100644 --- a/ppafm/ml/Corrector.py +++ b/ppafm/ml/Corrector.py @@ -1,11 +1,10 @@ -import matplotlib import numpy as np -import pyopencl as cl -from .. import atomicUtils as au -from .. import common as PPU -from .. import elements, io +from .. import io from ..dev import SimplePot as pot +from ..logging_utils import get_logger + +logger = get_logger("Corrector") # ====================================================== # ================== Class Molecule @@ -253,11 +252,11 @@ def debug_prob_map(self, molIn): ps = ps[mask] Ws = Ws[mask] nps = len(ps) - print("na0,nps", na0, nps) + logger.debug(f"na0,nps {na0} {nps}") Zs2 = np.concatenate([molIn.Zs, np.ones(nps, dtype=np.int32)]) Rs = np.concatenate([np.ones(na0), Ws]) xyzs2 = np.concatenate([molIn.xyzs, ps], axis=0) - print("xyzs2.shape ", xyzs2.shape) + logger.debug(f"xyzs2.shape {xyzs2.shape}") _saveXYZDebug(Zs2, xyzs2, "debug_genAtomWs.xyz", qs=([0.0] * (na0 + nps)), Rs=Rs) def try_improve(self, molIn, AFMs, AFMRef, span, itr=0): diff --git a/ppafm/ml/Generator.py b/ppafm/ml/Generator.py index ef8a3cde..c0c84f86 100755 --- a/ppafm/ml/Generator.py +++ b/ppafm/ml/Generator.py @@ -5,8 +5,11 @@ from .. import common as PPU from .. import io +from ..logging_utils import get_perf_logger from ..ocl import field as FFcl +perf_logger = get_perf_logger("Generator") + class InverseAFMtrainer: """ @@ -35,9 +38,6 @@ class InverseAFMtrainer: QZS: list of arrays of length 4. Positions of tip charges. """ - # Print timings during excecution - bRuntime = False - def __init__( self, afmulator, @@ -75,12 +75,10 @@ def __next__(self): Ys = [[] for _ in range(len(self.aux_maps))] batch_size = min(self.batch_size, len(self.molecules) - self.counter) - if self.bRuntime: - batch_start = time.time() + batch_start = time.perf_counter() for s in range(batch_size): - if self.bRuntime: - sample_start = time.time() + sample_start = time.perf_counter() # Load molecule mol = self.molecules[self.counter] @@ -114,11 +112,9 @@ def __next__(self): self.on_afm_start() # Evaluate AFM - if self.bRuntime: - afm_start = time.time() + afm_start = time.perf_counter() Xs[i].append(self.afmulator(self.xyzs, self.Zs, self.qs, REAs=self.REAs)) - if self.bRuntime: - print(f"AFM {i} runtime [s]: {time.time() - afm_start}") + perf_logger.info(f"AFM {i} runtime [s]: {time.perf_counter() - afm_start}") self.Xs = Xs[i][-1] # Callback @@ -126,15 +122,12 @@ def __next__(self): # Get AuxMaps for i, aux_map in enumerate(self.aux_maps): - if self.bRuntime: - aux_start = time.time() + aux_start = time.perf_counter() xyzqs = np.concatenate([self.xyzs, self.qs[:, None]], axis=1) Ys[i].append(aux_map(xyzqs, self.Zs)) - if self.bRuntime: - print(f"AuxMap {i} runtime [s]: {time.time() - aux_start}") + perf_logger.info(f"AuxMap {i} runtime [s]: {time.perf_counter() - aux_start}") - if self.bRuntime: - print(f"Sample {s} runtime [s]: {time.time() - sample_start}") + perf_logger.info(f"Sample {s} runtime [s]: {time.perf_counter() - sample_start}") self.counter += 1 for i in range(len(self.iZPPs)): @@ -143,8 +136,7 @@ def __next__(self): for i in range(len(self.aux_maps)): Ys[i] = np.stack(Ys[i], axis=0) - if self.bRuntime: - print(f"Batch runtime [s]: {time.time() - batch_start}") + perf_logger.info(f"Batch runtime [s]: {time.perf_counter() - batch_start}") else: raise StopIteration @@ -351,9 +343,6 @@ class GeneratorAFMtrainer: Ignored when sim_type is not ``'FDBM'``. """ - bRuntime = False - """Print timings during execution.""" - def __init__( self, afmulator, @@ -446,12 +435,10 @@ def __next__(self): Ys = [] sws = [] - if self.bRuntime: - batch_start = time.perf_counter() + batch_start = time.perf_counter() for s in range(self.batch_size): - if self.bRuntime: - sample_start = time.perf_counter() + sample_start = time.perf_counter() Xs_ = [] Ys_ = [] @@ -481,8 +468,7 @@ def __next__(self): # Callback self.on_sample_start() - if self.bRuntime: - print(f"Sample {s} preparation time [s]: {time.perf_counter() - sample_start}") + perf_logger.info(f"Sample {s} preparation time [s]: {time.perf_counter() - sample_start}") # Get AFM for i, (iZPP, rho_tip, rho_tip_delta, fft_tip, fft_tip_delta, Qs, QZs) in enumerate( @@ -508,11 +494,9 @@ def __next__(self): self.on_afm_start() # Evaluate AFM - if self.bRuntime: - afm_start = time.perf_counter() + afm_start = time.perf_counter() Xs_.append(self.afmulator(**self.sample_dict)) - if self.bRuntime: - print(f"AFM {i} runtime [s]: {time.perf_counter() - afm_start}") + perf_logger.info(f"AFM {i} runtime [s]: {time.perf_counter() - afm_start}") sws_.append(np.array(self.scan_window)) @@ -528,18 +512,15 @@ def __next__(self): pot = None xyzqs = np.concatenate([xyzs, qs[:, None]], axis=1) for i, aux_map in enumerate(self.aux_maps): - if self.bRuntime: - aux_start = time.perf_counter() + aux_start = time.perf_counter() Ys_.append(aux_map(xyzqs, Zs, pot, rot)) - if self.bRuntime: - print(f"AuxMap {i} runtime [s]: {time.perf_counter() - aux_start}") + perf_logger.info(f"AuxMap {i} runtime [s]: {time.perf_counter() - aux_start}") Xs.append(Xs_) Ys.append(Ys_) sws.append(sws_) - if self.bRuntime: - print(f"Sample {s} runtime [s]: {time.perf_counter() - sample_start}") + perf_logger.info(f"Sample {s} runtime [s]: {time.perf_counter() - sample_start}") if len(mols) == 0: # Sample iterator was empty raise StopIteration @@ -548,8 +529,7 @@ def __next__(self): Ys = np.array(Ys) sws = np.array(sws) - if self.bRuntime: - print(f"Batch runtime [s]: {time.perf_counter() - batch_start}") + perf_logger.info(f"Batch runtime [s]: {time.perf_counter() - batch_start}") return Xs, Ys, mols, sws diff --git a/ppafm/ocl/AFMulator.py b/ppafm/ocl/AFMulator.py index 1942fc46..d363b89a 100644 --- a/ppafm/ocl/AFMulator.py +++ b/ppafm/ocl/AFMulator.py @@ -7,6 +7,7 @@ import numpy as np from .. import common, elements, io +from ..logging_utils import get_logger, get_perf_logger, perf_log_enabled from ..PPPlot import plotImages from . import field as FFcl from . import oclUtils as oclu @@ -15,6 +16,9 @@ VALID_SIZES = np.array([16, 32, 64, 128, 192, 256, 384, 512, 768, 1024, 1536, 2048]) +logger = get_logger("AFMulator") +perf_logger = get_perf_logger("AFMulator") + class AFMulator: """ @@ -67,14 +71,12 @@ class AFMulator: """ bMergeConv = False # Should we use merged kernel relaxStrokesTilted_convZ or two separated kernells ( relaxStrokesTilted, convolveZ ) - bRuntime = False # Print timings during execution # --- Relaxation relaxParams = [0.5, 0.1, 0.1 * 0.2, 0.1 * 5.0] # (dt,damp, .., .. ) parameters of relaxation, in the moment just dt and damp are used # --- Output bSaveFF = False # Save debug ForceField as .xsf - verbose = 0 # Print information during excecution # ==================== Methods ===================== @@ -161,15 +163,13 @@ def eval(self, xyzs, Zs, qs, rho_sample=None, sample_lvec=None, rot=np.eye(3), r Returns: X: np.ndarray. Output AFM images. If input X is not None, this is the same array object as X with values overwritten. """ - if self.bRuntime: - t0 = time.perf_counter() + t0 = time.perf_counter() self.prepareFF(xyzs, Zs, qs, rho_sample, sample_lvec, rot, rot_center, REAs) self.prepareScanner() X = self.evalAFM(X) if plot_to_dir: self.plot_images(X, outdir=plot_to_dir) - if self.bRuntime: - print("runtime(AFMulator.eval) [s]: ", time.perf_counter() - t0) + perf_logger.info(f"eval [s]: {time.perf_counter() - t0}") return X def __call__(self, *args, **kwargs): @@ -186,8 +186,7 @@ def eval_(self, mol): def setLvec(self, lvec=None, pixPerAngstrome=None): """Set forcefield lattice vectors. If lvec is not given it is inferred from the scan window.""" - if self.bRuntime: - t0 = time.perf_counter() + t0 = time.perf_counter() if pixPerAngstrome is not None: self.pixPerAngstrome = pixPerAngstrome @@ -207,14 +206,12 @@ def setLvec(self, lvec=None, pixPerAngstrome=None): FEin_shape = self.forcefield.nDim if (self._old_nDim != self.forcefield.nDim).any() else None self.scanner.prepareBuffers(lvec=self.lvec, FEin_shape=FEin_shape) - if self.bRuntime: - print("runtime(AFMulator.setLvec) [s]: ", time.perf_counter() - t0) + perf_logger.info(f"setLvec [s]: {time.perf_counter() - t0}") def setScanWindow(self, scan_window=None, scan_dim=None, df_steps=None): """Set scanner scan window.""" - if self.bRuntime: - t0 = time.perf_counter() + t0 = time.perf_counter() if scan_window is not None: self.scan_window = scan_window @@ -241,8 +238,7 @@ def setScanWindow(self, scan_window=None, scan_dim=None, df_steps=None): self.scanner.updateBuffers(WZconv=self.dfWeight) self.scanner.preparePosBasis(start=self.scan_window[0][:2], end=self.scan_window[1][:2]) - if self.bRuntime: - print("runtime(AFMulator.setScanWindow) [s]: ", time.perf_counter() - t0) + perf_logger.info(f"setScanWindow [s]: {time.perf_counter() - t0}") def setRho(self, rho=None, sigma=0.71, B_pauli=1.0): """Set tip charge distribution. @@ -252,8 +248,7 @@ def setRho(self, rho=None, sigma=0.71, B_pauli=1.0): sigma: float. Tip charge density distribution when rho is a dict. B_pauli: float. Pauli repulsion exponent for tip density when using FDBM. """ - if self.bRuntime: - t0 = time.perf_counter() + t0 = time.perf_counter() if rho is not None: self.sigma = sigma self.B_pauli = B_pauli @@ -270,8 +265,7 @@ def setRho(self, rho=None, sigma=0.71, B_pauli=1.0): if not isinstance(rho, TipDensity): raise ValueError(f"rho should of type `TipDensity`, but got `{type(rho)}`") self.rho = rho - if self.verbose > 0: - print("AFMulator.setRho: Preparing buffers") + logger.debug("setRho: Preparing buffers") if not np.allclose(B_pauli, 1.0): rho_power = self.rho.power_positive(p=self.B_pauli, in_place=False) if self.minimize_memory: @@ -282,13 +276,12 @@ def setRho(self, rho=None, sigma=0.71, B_pauli=1.0): self._rho = None self.rho = None if self.forcefield.rho is not None: - if self.verbose > 0: - print("AFMulator.setRho: Releasing buffers") + logger.debug("setRho: Releasing buffers") self.forcefield.rho.release() self.forcefield.rho = None - if self.bRuntime: + if perf_log_enabled(): self.forcefield.queue.finish() - print("runtime(AFMulator.setRho) [s]: ", time.perf_counter() - t0) + perf_logger.info(f"setRho [s]: {time.perf_counter() - t0}") def setBPauli(self, B_pauli=1.0): """Set Pauli repulsion exponent used in FDBM.""" @@ -300,22 +293,18 @@ def setRhoDelta(self, rho_delta=None): Arguments: rho_delta: :class:`.TipDensity` or None. Tip electron delta-density. If None, the existing density is deleted. """ - if self.bRuntime: - t0 = time.perf_counter() + t0 = time.perf_counter() self.rho_delta = rho_delta if self.rho_delta is not None: if not isinstance(rho_delta, TipDensity): raise ValueError(f"rho_delta should of type `TipDensity`, but got `{type(rho_delta)}`") - if self.verbose > 0: - print("AFMulator.setRhoDelta: Preparing buffers") + logger.debug("setRhoDelta: Preparing buffers") self.forcefield.prepareBuffers(rho_delta=self.rho_delta, bDirect=True, minimize_memory=self.minimize_memory) elif self.forcefield.rho_delta is not None: - if self.verbose > 0: - print("AFMulator.setRhoDelta: Releasing buffers") + logger.debug("setRhoDelta: Releasing buffers") self.forcefield.rho_delta.release() self.forcefield.rho_delta = None - if self.bRuntime: - print("runtime(AFMulator.setRhoDelta) [s]: ", time.perf_counter() - t0) + perf_logger.info(f"setRhoDelta [s]: {time.perf_counter() - t0}") def setQs(self, Qs, QZs): """Set tip point charges.""" @@ -348,18 +337,14 @@ def prepareFF(self, xyzs, Zs, qs, rho_sample=None, sample_lvec=None, rot=np.eye( rot_center: np.ndarray of shape (3,). Center for rotation. Defaults to center of atom coordinates. REAs: np.ndarray of shape (num_atoms, 4). Lennard Jones interaction parameters. Calculated automatically if None. """ - if self.bRuntime: - t0 = time.perf_counter() + t0 = time.perf_counter() # Check if the scan window extends over any non-periodic boundaries and issue a warning if it does self.check_scan_window() # (Re)initialize force field if the size of the grid changed since last run. if (self._old_nDim != self.forcefield.nDim).any(): - if self.verbose > 0: - print("(Re)initializing force field buffers.") - if self.verbose > 1: - print(f"old nDim: {self._old_nDim}, new nDim: {self.forcefield.nDim}") + logger.debug(f"(Re)initializing force field buffers. Old nDim: {self._old_nDim}, new nDim: {self.forcefield.nDim}") self.forcefield.tryReleaseBuffers() if self._rho is not None: # The grid size changed so we need to recompute/reinterpolate the tip density grid @@ -441,14 +426,12 @@ def prepareFF(self, xyzs, Zs, qs, rho_sample=None, sample_lvec=None, rot=np.eye( if self.bSaveFF: self.saveFF() - if self.bRuntime: - print("runtime(AFMulator.prepareFF) [s]: ", time.perf_counter() - t0) + perf_logger.info(f"prepareFF [s]: {time.perf_counter() - t0}") def prepareScanner(self): """Prepare scanner. Run after preparing force field.""" - if self.bRuntime: - t0 = time.perf_counter() + t0 = time.perf_counter() # Copy forcefield array to scanner buffer self.scanner.updateFEin(self.forcefield.cl_FE) @@ -459,8 +442,7 @@ def prepareScanner(self): # Prepare tip position array self.scanner.setScanRot(self.pos0, rot=np.eye(3), tipR0=self.tipR0) - if self.bRuntime: - print("runtime(AFMulator.prepareScanner) [s]: ", time.perf_counter() - t0) + perf_logger.info(f"prepareScanner [s]: {time.perf_counter() - t0}") def evalAFM(self, X=None): """ @@ -474,8 +456,7 @@ def evalAFM(self, X=None): X: np.ndarray. Output AFM images. If input X is not None, this is the same array object as X with values overwritten. """ - if self.bRuntime: - t0 = time.perf_counter() + t0 = time.perf_counter() if self.bMergeConv: FEout = self.scanner.run_relaxStrokesTilted_convZ() @@ -493,8 +474,7 @@ def evalAFM(self, X=None): ) X[:, :, :] = FEout[:, :, :, 2] - if self.bRuntime: - print("runtime(AFMulator.evalAFM) [s]: ", time.perf_counter() - t0) + perf_logger.info(f"evalAFM [s]: {time.perf_counter() - t0}") return X @@ -597,15 +577,13 @@ def saveFF(self): FFx.flat[mask] *= (Fbound / Fr).flat[mask] FFy.flat[mask] *= (Fbound / Fr).flat[mask] FFz.flat[mask] *= (Fbound / Fr).flat[mask] - if self.verbose > 0: - print("FF.shape ", FF.shape) + logger.debug(f"FF.shape {FF.shape}") self.saveDebugXSF_FF(self.saveFFpre + "FF_x.xsf", FFx) self.saveDebugXSF_FF(self.saveFFpre + "FF_y.xsf", FFy) self.saveDebugXSF_FF(self.saveFFpre + "FF_z.xsf", FFz) def saveDebugXSF_FF(self, fname, F): - if self.verbose > 0: - print("saveDebugXSF : ", fname) + logger.debug(f"saveDebugXSF: {fname}") io.saveXSF(fname, F, self.lvec) def check_scan_window(self): @@ -621,8 +599,8 @@ def check_scan_window(self): scan_start -= self.tipR0[2] scan_end -= self.tipR0[2] if (scan_start < lvec_start) or (scan_end > lvec_end): - print( - f"Warning: The edge of the scan window in {dim} dimension is very close or extends over " + logger.warning( + f"The edge of the scan window in {dim} dimension is very close or extends over " f"the boundary of the force-field grid which is not periodic in {dim} dimension. " "If you get artifacts in the images, please check the boundary conditions and " "the size of the scan window and the force field grid." diff --git a/ppafm/ocl/field.py b/ppafm/ocl/field.py index 917210cd..587ab7ef 100644 --- a/ppafm/ocl/field.py +++ b/ppafm/ocl/field.py @@ -12,6 +12,7 @@ from ..defaults import d3 from ..fieldFFT import getProbeDensity from ..HighLevel import _getAtomsWhichTouchPBCcell, subtractCoreDensities +from ..logging_utils import get_logger, get_perf_logger, perf_log_enabled try: from reikna import cluda @@ -36,8 +37,8 @@ def init(env): oclu = env -verbose = 0 -bRuntime = False +logger = get_logger("ocl.field") +perf_logger = get_perf_logger("ocl.field") def makeDivisibleUp(num, divisor): @@ -175,8 +176,7 @@ def cl_array(self): if self._cl_array is None: self._cl_array = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=self.array) self.nbytes += 4 * np.prod(self.shape) - if verbose > 0: - print(f"DataGrid.nbytes {self.nbytes}") + logger.debug(f"DataGrid.nbytes {self.nbytes}") return self._cl_array def update_array(self, array, lvec): @@ -189,8 +189,7 @@ def update_array(self, array, lvec): if self._cl_array is not None: current_size = np.prod(self.shape) if array.size > current_size: - if verbose > 0: - print(f"Reallocating buffers. Old size = {current_size}, new size = {array.size}") + logger.debug(f"Reallocating buffers. Old size = {current_size}, new size = {array.size}") self._cl_array = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY, 4 * array.size) self.nbytes += 4 * (array.size - current_size) self._enqueue_event = cl.enqueue_copy(oclu.queue, self._cl_array, array, is_blocking=False) @@ -227,10 +226,10 @@ def from_file(cls, file_path, scale=1.0): file_path = str(file_path) if file_path.endswith(".cube"): - data, lvec, _, _ = io.loadCUBE(file_path, xyz_order=True, verbose=False) + data, lvec, _, _ = io.loadCUBE(file_path, xyz_order=True) Zs, x, y, z, _ = io.loadAtomsCUBE(file_path) elif file_path.endswith(".xsf"): - data, lvec, _, _ = io.loadXSF(file_path, xyz_order=True, verbose=False) + data, lvec, _, _ = io.loadXSF(file_path, xyz_order=True) try: (Zs, x, y, z, _), _, _ = io.loadXSFGeom(file_path) except ValueError: @@ -395,7 +394,10 @@ def power_positive(self, p=1.2, normalize=True, in_place=True, local_size=(32,), grid_out: Same type as self. New data grid with result. """ - if bRuntime: + queue = queue or oclu.queue + + if perf_log_enabled(): + queue.finish() t0 = time.perf_counter() array_in = self.cl_array @@ -409,7 +411,6 @@ def power_positive(self, p=1.2, normalize=True, in_place=True, local_size=(32,), else: scale = np.float32(1.0) - queue = queue or oclu.queue global_size = [int(np.ceil(n / local_size[0]) * local_size[0])] # fmt: off @@ -422,8 +423,9 @@ def power_positive(self, p=1.2, normalize=True, in_place=True, local_size=(32,), ) # fmt: on - if bRuntime: - print("runtime(DataGrid.power_positive) [s]: ", time.perf_counter() - t0) + if perf_log_enabled(): + queue.finish() + perf_logger.info(f"power_positive [s]: {time.perf_counter() - t0}") return grid_out @@ -681,8 +683,7 @@ def __init__(self, lvec, shape, center=[0, 0, 0], sigma=0.71, multipole={"dz2": super().__init__(array, lvec, ctx) def _make_tip_density(self, lvec, shape, center, sigma, multipole, tilt): - if bRuntime: - t0 = time.perf_counter() + t0 = time.perf_counter() lvec_len = np.linalg.norm(lvec, axis=1) center = np.array(center) @@ -699,8 +700,7 @@ def _make_tip_density(self, lvec, shape, center, sigma, multipole, tilt): step = lvec_len / shape rho = getProbeDensity(lvec, X, Y, Z, step, sigma=sigma, multipole_dict=multipole, tilt=tilt) - if bRuntime: - print("runtime(MultipoleTipDensity._make_tip_density) [s]: ", time.perf_counter() - t0) + perf_logger.info(f"_make_tip_density [s]: {time.perf_counter() - t0}") return rho.astype(np.float32) @@ -726,8 +726,7 @@ def __init__(self, rho, queue=None): self._make_transforms() self._make_fft() self._set_rho(rho) - if verbose > 0: - print(f"FFTCrossCorrelation.nbytes {self.nbytes}") + logger.debug(f"FFTCrossCorrelation.nbytes {self.nbytes}") # https://github.com/fjarri/reikna/issues/57 def _make_transforms(self): @@ -771,7 +770,9 @@ def _make_transforms(self): # fmt: on def _make_fft(self): - if bRuntime: + + if perf_log_enabled(): + self.queue.finish() t0 = time.perf_counter() thr = ocl_api().Thread(self.queue) @@ -789,8 +790,9 @@ def _make_fft(self): fft_i.parameter.output.connect(self.c2r, self.c2r.input, new_output=self.c2r.output, scale=self.c2r.scale) self.fft_i = fft_i.compile(thr) - if bRuntime: - print("runtime(FFTCrossCorrelation._make_fft) [s]: ", time.perf_counter() - t0) + if perf_log_enabled(): + self.queue.finish() + perf_logger.info(f"_make_fft [s]: {time.perf_counter() - t0}") def _set_rho(self, rho): self.rho = rho @@ -811,7 +813,8 @@ def correlate(self, array, E=None, scale=1): E: :class:`DataGrid`. Result of cross-correlation. """ - if bRuntime: + if perf_log_enabled(): + self.queue.finish() t0 = time.perf_counter() if isinstance(array, DataGrid): @@ -825,17 +828,17 @@ def correlate(self, array, E=None, scale=1): assert E.shape == self.shape, "E data grid shape does not match" E_cl = E.cl_array - if bRuntime: + if perf_log_enabled(): self.queue.finish() - print("runtime(FFTCrossCorrelation.correlate.pre) [s]: ", time.perf_counter() - t0) + perf_logger.info(f"correlate.pre [s]: {time.perf_counter() - t0}") # Do cross-correlation self.fft_f(output=self.pot_hat_cl, new_input=array, inverse=0) self.fft_i(new_output=E_cl, new_input1=self.rho_hat_cl, new_input2=self.pot_hat_cl, scale=scale * self.rho.cell_vol, inverse=1) - if bRuntime: + if perf_log_enabled(): self.queue.finish() - print("runtime(FFTCrossCorrelation.correlate) [s]: ", time.perf_counter() - t0) + perf_logger.info(f"correlate [s]: {time.perf_counter() - t0}") return E @@ -843,8 +846,6 @@ def correlate(self, array, E=None, scale=1): class ForceField_LJC: """Evaluate Lennard-Jones based force fields on an OpenCL device.""" - verbose = 0 - def __init__(self): self.ctx = oclu.ctx self.queue = oclu.queue @@ -912,7 +913,7 @@ def prepareBuffers( ): """Allocate all necessary buffers in device memory.""" - if bRuntime: + if perf_log_enabled(): self.queue.finish() t0 = time.perf_counter() @@ -946,8 +947,7 @@ def prepareBuffers( nb = self.nDim[0] * self.nDim[1] * self.nDim[2] * 4 * nb_float self.cl_FE = cl.Buffer(self.ctx, mf.WRITE_ONLY, nb) nbytes += nb - if self.verbose > 0: - print(" forcefield.prepareBuffers() : self.cl_FE ", self.cl_FE) + logger.debug(f" forcefield.prepareBuffers() : self.cl_FE {self.cl_FE}") if pot is not None: assert isinstance(pot, HartreePotential), "pot should be a HartreePotential object" self.pot = pot @@ -985,25 +985,22 @@ def prepareBuffers( self.rho_sample = rho_sample self.rho_sample.cl_array - if self.verbose > 0: - print("ForceField_LJC.prepareBuffers.nbytes", nbytes) + logger.debug(f"ForceField_LJC.prepareBuffers.nbytes {nbytes}") - if bRuntime: + if perf_log_enabled(): self.queue.finish() - print("runtime(ForceField_LJC.prepareBuffers) [s]: ", time.perf_counter() - t0) + perf_logger.info(f"prepareBuffers [s]: {time.perf_counter() - t0}") def updateBuffers(self, atoms=None, cLJs=None, poss=None): """Update the content of device buffers.""" - if self.verbose > 0: - print(" ForceField_LJC.updateBuffers ") + logger.debug("ForceField_LJC.updateBuffers") oclu.updateBuffer(atoms, self.cl_atoms) oclu.updateBuffer(cLJs, self.cl_cLJs) oclu.updateBuffer(poss, self.cl_poss) def tryReleaseBuffers(self): """Release all device buffers.""" - if self.verbose > 0: - print(" ForceField_LJC.tryReleaseBuffers ") + logger.debug("ForceField_LJC.tryReleaseBuffers") try: self.cl_atoms.release() self.cl_atoms = None @@ -1076,8 +1073,7 @@ def downloadFF(self, FE=None): else: FE = np.empty((self.nDim[3],) + tuple(self.nDim[:3]), dtype=np.float32, order="F") - if self.verbose: - print("FE.shape ", FE.shape) + logger.debug(f"FE.shape {FE.shape}") # Copy from device to host cl.enqueue_copy(self.queue, FE, self.cl_FE) @@ -1114,7 +1110,8 @@ def run_evalLJ_noPos(self, FE=None, local_size=(32,), bCopy=True, bFinish=True): FE: np.ndarray if bCopy == True or None otherwise. Calculated force field and energy. """ - if bRuntime: + if perf_log_enabled(): + self.queue.finish() t0 = time.perf_counter() global_size = [int(np.ceil(np.prod(self.nDim[:3]) / local_size[0]) * local_size[0])] @@ -1124,27 +1121,28 @@ def run_evalLJ_noPos(self, FE=None, local_size=(32,), bCopy=True, bFinish=True): if bCopy: FE = self.downloadFF(FE) - if bFinish: + if bFinish or perf_log_enabled(): self.queue.finish() - if bRuntime: - print("runtime(ForceField_LJC.run_evalLJ_noPos) [s]: ", time.perf_counter() - t0) + if perf_log_enabled(): + perf_logger.info(f"run_evalLJ_noPos [s]: {time.perf_counter() - t0}") return FE def run_evalLJC_QZs_noPos(self, FE=None, local_size=(32,), bCopy=True, bFinish=True): """Compute Lennard-Jones force field with several point-charges separated on the z-axis.""" - if bRuntime: - t0 = time.time() + if perf_log_enabled(): + self.queue.finish() + t0 = time.perf_counter() if bCopy and (FE is None): ns = tuple(self.nDim[:3]) + (4,) FE = np.zeros(ns, dtype=np.float32) - if self.verbose > 0: - print("FE.shape", FE.shape, self.nDim) + logger.debug(f"FE.shape {FE.shape} {self.nDim}") ntot = self.nDim[0] * self.nDim[1] * self.nDim[2] ntot = makeDivisibleUp(ntot, local_size[0]) # TODO: - we should make sure it does not overflow global_size = (ntot,) # TODO make sure divisible by local_size - if bRuntime: - print("runtime(ForceField_LJC.run_evalLJC_QZs_noPos.pre) [s]: ", time.time() - t0) + if perf_log_enabled(): + self.queue.finish() + perf_logger.info(f"run_evalLJC_QZs_noPos.pre [s]: {time.perf_counter() - t0}") # fmt: off cl_program.evalLJC_QZs_noPos(self.queue, global_size, local_size, self.nAtoms, @@ -1162,10 +1160,10 @@ def run_evalLJC_QZs_noPos(self, FE=None, local_size=(32,), bCopy=True, bFinish=T # fmt: on if bCopy: cl.enqueue_copy(self.queue, FE, self.cl_FE) - if bFinish: + if bFinish or perf_log_enabled(): self.queue.finish() - if bRuntime: - print("runtime(ForceField_LJC.run_evalLJC_QZs_noPos) [s]: ", time.time() - t0) + if perf_log_enabled(): + perf_logger.info(f"run_evalLJC_QZs_noPos [s]: {time.perf_counter() - t0}") return FE def run_evalLJC_Hartree(self, FE=None, local_size=(32,), bCopy=True, bFinish=True): @@ -1184,13 +1182,15 @@ def run_evalLJC_Hartree(self, FE=None, local_size=(32,), bCopy=True, bFinish=Tru FE: np.ndarray if bCopy == True or None otherwise. Calculated force field and energy. """ - if bRuntime: + if perf_log_enabled(): + self.queue.finish() t0 = time.perf_counter() T = np.append(np.linalg.inv(self.dlvec[:, :3]).T.copy(), np.zeros((3, 1)), axis=1).astype(np.float32) - if bRuntime: - print("runtime(ForceField_LJC.run_evalLJC_Hartree.pre) [s]: ", time.perf_counter() - t0) + if perf_log_enabled(): + self.queue.finish() + perf_logger.info(f"run_evalLJC_Hartree.pre [s]: {time.perf_counter() - t0}") global_size = [int(np.ceil(np.prod(self.nDim[:3]) / local_size[0]) * local_size[0])] cl_program.evalLJC_Hartree( @@ -1216,10 +1216,10 @@ def run_evalLJC_Hartree(self, FE=None, local_size=(32,), bCopy=True, bFinish=Tru if bCopy: FE = self.downloadFF(FE) - if bFinish: + if bFinish or perf_log_enabled(): self.queue.finish() - if bRuntime: - print("runtime(ForceField_LJC.run_evalLJC_Hartree) [s]: ", time.perf_counter() - t0) + if perf_log_enabled(): + perf_logger.info(f"run_evalLJC_Hartree [s]: {time.perf_counter() - t0}") return FE @@ -1243,7 +1243,8 @@ def run_gradPotentialGrid(self, pot=None, E_field=None, h=None, local_size=(32,) E_field: np.ndarray if bCopy == True or None otherwise. Calculated electric field and potential. """ - if bRuntime: + if perf_log_enabled(): + self.queue.finish() t0 = time.perf_counter() if pot: @@ -1269,11 +1270,11 @@ def run_gradPotentialGrid(self, pot=None, E_field=None, h=None, local_size=(32,) and (np.abs(np.diag(self.pot.step) * self.pot.shape - np.diag(self.lvec[:, :3])) < 1e-3).all() and (self.pot.step == np.diag(np.diag(self.pot.step))).all() ) - if self.verbose > 0: - print("Matching grid:", matching_grid) + logger.debug(f"Matching grid: {matching_grid}") - if bRuntime: - print("runtime(ForceField_LJC.run_gradPotentialGrid.pre) [s]: ", time.perf_counter() - t0) + if perf_log_enabled(): + self.queue.finish() + perf_logger.info(f"run_gradPotentialGrid.pre [s]: {time.perf_counter() - t0}") global_size = [int(np.ceil(np.prod(self.nDim[:3]) / local_size[0]) * local_size[0])] if matching_grid: @@ -1309,10 +1310,10 @@ def run_gradPotentialGrid(self, pot=None, E_field=None, h=None, local_size=(32,) if bCopy: cl.enqueue_copy(self.queue, E_field, self.cl_Efield) - if bFinish: + if bFinish or perf_log_enabled(): self.queue.finish() - if bRuntime: - print("runtime(ForceField_LJC.run_gradPotentialGrid) [s]: ", time.perf_counter() - t0) + if perf_log_enabled(): + perf_logger.info(f"run_gradPotentialGrid [s]: {time.perf_counter() - t0}") return E_field @@ -1417,13 +1418,14 @@ def add_dftd3(self, params="PBE", local_size=(64,)): functional name or a dict with manually specified parameters. local_size: tuple of a single int. Size of local work group on device. """ - if bRuntime: + if perf_log_enabled(): + self.queue.finish() t0 = time.perf_counter() local_size = (min(local_size[0], 64),) # The kernel uses shared memory arrays with size 64. Let's not overflow. self._get_dftd3_params(params, local_size) - if bRuntime: + if perf_log_enabled(): self.queue.finish() - print("runtime(ForceField_LJC.add_dftd3.get_params) [s]: ", time.perf_counter() - t0) + perf_logger.info(f"add_dftd3.get_params [s]: {time.perf_counter() - t0}") global_size = [int(np.ceil(np.prod(self.nDim[:3]) / local_size[0]) * local_size[0])] # fmt: off cl_program.addDFTD3_BJ(self.queue, global_size, local_size, @@ -1438,9 +1440,9 @@ def add_dftd3(self, params="PBE", local_size=(64,)): self.dlvec[2] ) # fmt: on - if bRuntime: + if perf_log_enabled(): self.queue.finish() - print("runtime(ForceField_LJC.add_dftd3) [s]: ", time.perf_counter() - t0) + perf_logger.info(f"add_dftd3 [s]: {time.perf_counter() - t0}") def calc_force_hartree(self, FE=None, rot=np.eye(3), rot_center=np.zeros(3), local_size=(32,), bCopy=True, bFinish=True): """ @@ -1459,7 +1461,8 @@ def calc_force_hartree(self, FE=None, rot=np.eye(3), rot_center=np.zeros(3), loc FE: np.ndarray if bCopy==True or None otherwise. Calculated force field and energy. """ - if bRuntime: + if perf_log_enabled(): + self.queue.finish() t0 = time.perf_counter() if (self.lvec[:, :3] != np.diag(np.diag(self.lvec[:, :3]))).any(): @@ -1471,32 +1474,33 @@ def calc_force_hartree(self, FE=None, rot=np.eye(3), rot_center=np.zeros(3), loc lvec = np.concatenate([self.lvec0[None, :3], self.lvec[:, :3]], axis=0) pot = self.pot.interp_at(lvec, self.nDim[:3], rot=rot, rot_center=rot_center, local_size=local_size, queue=self.queue) - if bRuntime: - print("runtime(ForceField_LJC.calc_force_hartree.interpolate) [s]: ", time.perf_counter() - t0) + if perf_log_enabled(): + self.queue.finish() + perf_logger.info(f"calc_force_hartree.interpolate [s]: {time.perf_counter() - t0}") # Cross-correlate Hartree potential and tip charge density E = self.fft_corr.correlate(pot) - if bRuntime: + if perf_log_enabled(): self.queue.finish() - print("runtime(ForceField_LJC.calc_force_hartree.cross-correlation) [s]: ", time.perf_counter() - t0) + perf_logger.info(f"calc_force_hartree.cross-correlation [s]: {time.perf_counter() - t0}") # Take gradient to get electrostatic force field E.grad(scale=[-1.0, -1.0, -1.0, 1.0], array_out=self.cl_FE, order="F", local_size=local_size, queue=self.queue) - if bRuntime: + if perf_log_enabled(): self.queue.finish() - print("runtime(ForceField_LJC.calc_force_hartree.gradient) [s]: ", time.perf_counter() - t0) + perf_logger.info(f"calc_force_hartree.gradient [s]: {time.perf_counter() - t0}") # Add Lennard-Jones force self.addLJ(local_size=local_size) if bCopy: FE = self.downloadFF(FE) - if bFinish or bRuntime: + if bFinish or perf_log_enabled(): self.queue.finish() - if bRuntime: - print("runtime(ForceField_LJC.calc_force_hartree) [s]: ", time.perf_counter() - t0) + if perf_log_enabled(): + perf_logger.info(f"calc_force_hartree [s]: {time.perf_counter() - t0}") return FE @@ -1527,7 +1531,8 @@ def calc_force_fdbm( FE: np.ndarray if ``bCopy==True`` or ``None`` otherwise. Calculated force field and energy. """ - if bRuntime: + if perf_log_enabled(): + self.queue.finish() t0 = time.perf_counter() if (self.lvec[:, :3] != np.diag(np.diag(self.lvec[:, :3]))).any(): @@ -1548,9 +1553,9 @@ def calc_force_fdbm( else: rho_sample = self.rho_sample - if bRuntime: + if perf_log_enabled(): self.queue.finish() - print("runtime(ForceField_LJC.calc_force_fdbm.interpolate) [s]: ", time.perf_counter() - t0) + perf_logger.info(f"calc_force_fdbm.interpolate [s]: {time.perf_counter() - t0}") # Cross-correlate Hartree potential and tip electron delta density for electrostatic energy E_es = self.fft_corr_delta.correlate(pot, scale=-1.0) # scale=-1.0, because the electron density has positive sign. @@ -1564,25 +1569,25 @@ def calc_force_fdbm( if not rho_sample_lvec_same: rho_sample.release(keep_on_host=False) - if bRuntime: + if perf_log_enabled(): self.queue.finish() - print("runtime(ForceField_LJC.calc_force_fdbm.cross-correlation) [s]: ", time.perf_counter() - t0) + perf_logger.info(f"calc_force_fdbm.cross-correlation [s]: {time.perf_counter() - t0}") # Add the two energy contributions together E = E_es.add_mult(E_pauli, scale=A, in_place=True, local_size=local_size, queue=self.queue) E_pauli.release(keep_on_host=False) - if bRuntime: + if perf_log_enabled(): self.queue.finish() - print("runtime(ForceField_LJC.calc_force_fdbm.add_mult) [s]: ", time.perf_counter() - t0) + perf_logger.info(f"calc_force_fdbm.add_mult [s]: {time.perf_counter() - t0}") # Take gradient to get the force field E.grad(scale=[-1.0, -1.0, -1.0, 1.0], array_out=self.cl_FE, order="F", local_size=local_size, queue=self.queue) E.release(keep_on_host=False) - if bRuntime: + if perf_log_enabled(): self.queue.finish() - print("runtime(ForceField_LJC.calc_force_fdbm.gradient) [s]: ", time.perf_counter() - t0) + perf_logger.info(f"calc_force_fdbm.gradient [s]: {time.perf_counter() - t0}") # Add vdW force if vdw_type == "D3": @@ -1594,10 +1599,10 @@ def calc_force_fdbm( if bCopy: FE = self.downloadFF(FE) - if bFinish or bRuntime: + if bFinish or perf_log_enabled(): self.queue.finish() - if bRuntime: - print("runtime(ForceField_LJC.calc_force_fdbm) [s]: ", time.perf_counter() - t0) + if perf_log_enabled(): + perf_logger.info(f"calc_force_fdbm [s]: {time.perf_counter() - t0}") return FE @@ -1679,7 +1684,8 @@ def makeFF( FE: np.ndarray if ``bCopy==True`` or ``None`` otherwise. Calculated force field and energy. """ - if bRuntime: + if perf_log_enabled(): + self.queue.finish() t0 = time.perf_counter() if not hasattr(self, "nDim") or not hasattr(self, "lvec"): @@ -1697,8 +1703,9 @@ def makeFF( qs = np.zeros(len(xyzs)) self.atoms = np.concatenate([xyzs, qs[:, None]], axis=1) self.prepareBuffers(self.atoms, cLJs, REAs=REAs, Zs=Zs, pot=pot, rho=rho, rho_delta=rho_delta, rho_sample=rho_sample) - if bRuntime: - print("runtime(ForceField_LJC.makeFF.pre) [s]: ", time.perf_counter() - t0) + if perf_log_enabled(): + self.queue.finish() + perf_logger.info(f"makeFF.pre [s]: {time.perf_counter() - t0}") if method == "point-charge": if np.allclose(self.atoms[:, -1], 0): # No charges @@ -1734,8 +1741,9 @@ def makeFF( if bRelease: self.tryReleaseBuffers() - if bRuntime: - print("runtime(ForceField_LJC.makeFF.tot) [s]: ", time.perf_counter() - t0) + if perf_log_enabled(): + self.queue.finish() + perf_logger.info(f"makeFF.tot [s]: {time.perf_counter() - t0}") return FF @@ -1771,23 +1779,20 @@ def makeCoefsZR(self, Zs, ELEMENTS): """ na = len(Zs) coefs = np.zeros((na, 4), dtype=np.float32) - if verbose > 0: - print("Zs", Zs) + logger.debug(f"Zs {Zs}") for i, ie in enumerate(Zs): coefs[i, 0] = 1.0 coefs[i, 1] = ie coefs[i, 2] = ELEMENTS[ie - 1][6] coefs[i, 3] = ELEMENTS[ie - 1][7] - if verbose > 0: - print("coefs[:,2]", coefs[:, 2]) + logger.debug(f"coefs[:,2] {coefs[:, 2]}") return coefs def prepareBuffers(self, atoms, prj_dim, coefs=None, bonds2atoms=None, Rfunc=None, elem_channels=None): """ allocate GPU buffers """ - if verbose > 0: - print("AtomProjection.prepareBuffers prj_dim", prj_dim) + logger.debug(f"AtomProjection.prepareBuffers prj_dim {prj_dim}") self.prj_dim = prj_dim nbytes = 0 self.nAtoms = np.int32(len(atoms)) @@ -1836,8 +1841,7 @@ def prepareBuffers(self, atoms, prj_dim, coefs=None, bonds2atoms=None, Rfunc=Non self.cl_elem_channels = cl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=elem_channels) nbytes += elem_channels.nbytes - if verbose > 0: - print("AtomProjection.prepareBuffers.nbytes ", nbytes) + logger.debug(f"AtomProjection.prepareBuffers.nbytes {nbytes}") def updateBuffers(self, atoms=None, coefs=None, poss=None): """ @@ -1865,19 +1869,16 @@ def releaseBuffers(self): """ deallocated all GPU buffers """ - if verbose > 0: - print(" AtomProjection.releaseBuffers ") + logger.debug("AtomProjection.releaseBuffers") self.cl_atoms.release() self.cl_coefs.release() self.cl_poss.release() - self.cl_FE.release() def tryReleaseBuffers(self): """ deallocated all GPU buffers (those which exists) """ - if verbose > 0: - print(" AtomProjection.releaseBuffers ") + logger.debug("AtomProjection.releaseBuffers") try: self.cl_atoms.release() except: @@ -1890,10 +1891,6 @@ def tryReleaseBuffers(self): self.cl_poss.release() except: pass - try: - self.cl_FE.release() - except: - pass def run_evalLorenz(self, poss=None, Eout=None, local_size=(32,)): """ @@ -1901,11 +1898,9 @@ def run_evalLorenz(self, poss=None, Eout=None, local_size=(32,)): """ if Eout is None: Eout = np.zeros(self.prj_dim, dtype=np.float32) - if verbose > 0: - print("FE.shape", Eout.shape, self.nDim) + logger.debug(f"FE.shape {Eout.shape}") if poss is not None: - if verbose > 0: - print("poss.shape ", poss.shape, self.prj_dim, poss.nbytes, poss.dtype) + logger.debug(f"poss.shape {poss.shape} {self.prj_dim} {poss.nbytes} {poss.dtype}") oclu.updateBuffer(poss, self.cl_poss) ntot = self.prj_dim[0] * self.prj_dim[1] ntot = makeDivisibleUp(ntot, local_size[0]) # TODO: - we should make sure it does not overflow @@ -1931,11 +1926,9 @@ def run_evaldisks(self, poss=None, Eout=None, tipRot=None, offset=0.0, local_siz self.tipRot = tipRot if Eout is None: Eout = np.zeros(self.prj_dim, dtype=np.float32) - if verbose > 0: - print("FE.shape", Eout.shape, self.nDim) + logger.debug(f"FE.shape {Eout.shape}") if poss is not None: - if verbose > 0: - print("poss.shape ", poss.shape, self.prj_dim, poss.nbytes, poss.dtype) + logger.debug(f"poss.shape {poss.shape} {self.prj_dim} {poss.nbytes} {poss.dtype}") oclu.updateBuffer(poss, self.cl_poss) ntot = self.prj_dim[0] * self.prj_dim[1] ntot = makeDivisibleUp(ntot, local_size[0]) # TODO: - we should make sure it does not overflow @@ -1968,11 +1961,9 @@ def run_evaldisks_occlusion(self, poss=None, Eout=None, tipRot=None, local_size= self.tipRot = tipRot if Eout is None: Eout = np.zeros(self.prj_dim, dtype=np.float32) - if verbose > 0: - print("FE.shape", Eout.shape, self.nDim) + logger.debug(f"FE.shape {Eout.shape}") if poss is not None: - if verbose > 0: - print("poss.shape ", poss.shape, self.prj_dim, poss.nbytes, poss.dtype) + logger.debug(f"poss.shape {poss.shape} {self.prj_dim} {poss.nbytes} {poss.dtype}") oclu.updateBuffer(poss, self.cl_poss) ntot = self.prj_dim[0] * self.prj_dim[1] ntot = makeDivisibleUp(ntot, local_size[0]) # TODO: - we should make sure it does not overflow @@ -2005,11 +1996,9 @@ def run_evalSpheres(self, poss=None, Eout=None, tipRot=None, local_size=(32,)): self.tipRot = tipRot if Eout is None: Eout = np.zeros(self.prj_dim, dtype=np.float32) - if verbose > 0: - print("FE.shape", Eout.shape, self.nDim) + logger.debug(f"FE.shape {Eout.shape}") if poss is not None: - if verbose > 0: - print("poss.shape ", poss.shape, self.prj_dim, poss.nbytes, poss.dtype) + logger.debug(f"poss.shape {poss.shape} {self.prj_dim} {poss.nbytes} {poss.dtype}") oclu.updateBuffer(poss, self.cl_poss) ntot = self.prj_dim[0] * self.prj_dim[1] ntot = makeDivisibleUp(ntot, local_size[0]) # TODO: - we should make sure it does not overflow @@ -2040,11 +2029,9 @@ def run_evalSphereCaps(self, poss=None, Eout=None, tipRot=None, local_size=(32,) self.tipRot = tipRot if Eout is None: Eout = np.zeros(self.prj_dim, dtype=np.float32) - if verbose > 0: - print("FE.shape", Eout.shape, self.nDim) + logger.debug(f"FE.shape {Eout.shape}") if poss is not None: - if verbose > 0: - print("poss.shape ", poss.shape, self.prj_dim, poss.nbytes, poss.dtype) + logger.debug(f"poss.shape {poss.shape} {self.prj_dim} {poss.nbytes} {poss.dtype}") oclu.updateBuffer(poss, self.cl_poss) ntot = self.prj_dim[0] * self.prj_dim[1] ntot = makeDivisibleUp(ntot, local_size[0]) # TODO: - we should make sure it does not overflow @@ -2077,11 +2064,9 @@ def run_evalQdisks(self, poss=None, Eout=None, tipRot=None, local_size=(32,)): self.tipRot = tipRot if Eout is None: Eout = np.zeros(self.prj_dim, dtype=np.float32) - if verbose > 0: - print("FE.shape", Eout.shape, self.nDim) + logger.debug(f"FE.shape {Eout.shape}") if poss is not None: - if verbose > 0: - print("poss.shape ", poss.shape, self.prj_dim, poss.nbytes, poss.dtype) + logger.debug(f"poss.shape {poss.shape} {self.prj_dim} {poss.nbytes} {poss.dtype}") oclu.updateBuffer(poss, self.cl_poss) ntot = self.prj_dim[0] * self.prj_dim[1] ntot = makeDivisibleUp(ntot, local_size[0]) # TODO: - we should make sure it does not overflow @@ -2111,8 +2096,7 @@ def run_evalMultiMapSpheres(self, poss=None, Eout=None, tipRot=None, bOccl=0, Rm if Eout is None: Eout = np.zeros(self.prj_dim, dtype=np.float32) if poss is not None: - if verbose > 0: - print("poss.shape ", poss.shape, self.prj_dim, poss.nbytes, poss.dtype) + logger.debug(f"poss.shape {poss.shape} {self.prj_dim} {poss.nbytes} {poss.dtype}") oclu.updateBuffer(poss, self.cl_poss) ntot = self.prj_dim[0] * self.prj_dim[1] ntot = makeDivisibleUp(ntot, local_size[0]) # TODO: - we should make sure it does not overflow @@ -2148,8 +2132,7 @@ def run_evalMultiMapSpheresElements(self, poss=None, Eout=None, tipRot=None, bOc if Eout is None: Eout = np.zeros(self.prj_dim, dtype=np.float32) if poss is not None: - if verbose > 0: - print("poss.shape ", poss.shape, self.prj_dim, poss.nbytes, poss.dtype) + logger.debug(f"poss.shape {poss.shape} {self.prj_dim} {poss.nbytes} {poss.dtype}") oclu.updateBuffer(poss, self.cl_poss) ntot = self.prj_dim[0] * self.prj_dim[1] ntot = makeDivisibleUp(ntot, local_size[0]) # TODO: - we should make sure it does not overflow @@ -2184,8 +2167,7 @@ def run_evalSpheresType(self, poss=None, Eout=None, tipRot=None, bOccl=0, local_ if Eout is None: Eout = np.zeros(self.prj_dim, dtype=np.float32) if poss is not None: - if verbose > 0: - print("poss.shape ", poss.shape, self.prj_dim, poss.nbytes, poss.dtype) + logger.debug(f"poss.shape {poss.shape} {self.prj_dim} {poss.nbytes} {poss.dtype}") oclu.updateBuffer(poss, self.cl_poss) ntot = self.prj_dim[0] * self.prj_dim[1] ntot = makeDivisibleUp(ntot, local_size[0]) # TODO: - we should make sure it does not overflow @@ -2220,8 +2202,7 @@ def run_evalBondEllipses(self, poss=None, Eout=None, tipRot=None, local_size=(32 if Eout is None: Eout = np.zeros(self.prj_dim, dtype=np.float32) if poss is not None: - if verbose > 0: - print("poss.shape ", poss.shape, self.prj_dim, poss.nbytes, poss.dtype) + logger.debug(f"poss.shape {poss.shape} {self.prj_dim} {poss.nbytes} {poss.dtype}") oclu.updateBuffer(poss, self.cl_poss) ntot = self.prj_dim[0] * self.prj_dim[1] ntot = makeDivisibleUp(ntot, local_size[0]) # TODO: - we should make sure it does not overflow @@ -2255,8 +2236,7 @@ def run_evalAtomRfunc(self, poss=None, Eout=None, tipRot=None, local_size=(32,)) if Eout is None: Eout = np.zeros(self.prj_dim, dtype=np.float32) if poss is not None: - if verbose > 0: - print("poss.shape ", poss.shape, self.prj_dim, poss.nbytes, poss.dtype) + logger.debug(f"poss.shape {poss.shape} {self.prj_dim} {poss.nbytes} {poss.dtype}") oclu.updateBuffer(poss, self.cl_poss) ntot = self.prj_dim[0] * self.prj_dim[1] ntot = makeDivisibleUp(ntot, local_size[0]) # TODO: - we should make sure it does not overflow @@ -2287,11 +2267,9 @@ def run_evalCoulomb(self, poss=None, Eout=None, local_size=(32,)): """ if Eout is None: Eout = np.zeros(self.prj_dim, dtype=np.float32) - if verbose > 0: - print("FE.shape", Eout.shape, self.nDim) + logger.debug(f"FE.shape {Eout.shape}") if poss is not None: - if verbose > 0: - print("poss.shape ", poss.shape, self.prj_dim, poss.nbytes, poss.dtype) + logger.debug(f"poss.shape {poss.shape} {self.prj_dim} {poss.nbytes} {poss.dtype}") oclu.updateBuffer(poss, self.cl_poss) ntot = self.prj_dim[0] * self.prj_dim[1] ntot = makeDivisibleUp(ntot, local_size[0]) # TODO: - we should make sure it does not overflow @@ -2325,11 +2303,9 @@ def run_evalHartreeGradient(self, pot, poss=None, Eout=None, h=None, rot=np.eye( if Eout is None: Eout = np.zeros(self.prj_dim[:2], dtype=np.float32) - if verbose > 0: - print("FE.shape", Eout.shape, self.nDim) + logger.debug(f"FE.shape {Eout.shape}") if poss is not None: - if verbose > 0: - print("poss.shape ", poss.shape, self.prj_dim, poss.nbytes, poss.dtype) + logger.debug(f"poss.shape {poss.shape} {self.prj_dim} {poss.nbytes} {poss.dtype}") oclu.updateBuffer(poss, self.cl_poss) global_size = (int(np.ceil(np.prod(self.prj_dim[:2]) / local_size[0]) * local_size[0]),) diff --git a/ppafm/ocl/oclUtils.py b/ppafm/ocl/oclUtils.py index c2281b79..f668cf77 100644 --- a/ppafm/ocl/oclUtils.py +++ b/ppafm/ocl/oclUtils.py @@ -1,17 +1,19 @@ from pathlib import Path -import numpy as np import pyopencl as cl +from ..logging_utils import get_logger from . import field as FFcl from . import relax as oclr +logger = get_logger("oclUtils") + class OCLEnvironment: def __init__(self, i_platform=0): platforms = get_platforms() self.platform = platforms[i_platform] - print(f"Initializing an OpenCL environment on {self.platform.name}") + logger.info(f"Initializing an OpenCL environment on {self.platform.name}") self.PACKAGE_PATH = Path(__file__).resolve().parent self.CL_PATH = self.PACKAGE_PATH / "cl" @@ -39,30 +41,30 @@ def updateBuffer(self, buff, cl_buff, access=cl.mem_flags): def printInfo(self): # fmt: off - print("======= DEVICES\n", self.ctx.get_info(cl.context_info.DEVICES)) - print("======= PROPERTIES\n", self.ctx.get_info(cl.context_info.PROPERTIES)) - print("======= REFERENCE_COUNT\n", self.ctx.get_info(cl.context_info.REFERENCE_COUNT)) + logger.info("======= DEVICES\n" + str(self.ctx.get_info(cl.context_info.DEVICES))) + logger.info("======= PROPERTIES\n" + str(self.ctx.get_info(cl.context_info.PROPERTIES))) + logger.info("======= REFERENCE_COUNT\n" + str(self.ctx.get_info(cl.context_info.REFERENCE_COUNT))) # fmt: on def printPlatformInfo(self): # fmt: off platform = self.platform - print("===============================================================") - print(" Platform name:", platform.name) - print(" Platform profile:", platform.profile) - print(" Platform vendor:", platform.vendor) - print(" Platform version:", platform.version) + logger.info("===============================================================") + logger.info(" Platform name: " + platform.name) + logger.info(" Platform profile: " + platform.profile) + logger.info(" Platform vendor: " + platform.vendor) + logger.info(" Platform version: " + platform.version) for device in platform.get_devices(): - print("---------------------------------------------------------------") - print(" Device name:", device.name) - print(" type:", cl.device_type.to_string(device.type)) - print(" memory: ", device.global_mem_size // 1024 // 1024, 'MB') - print(" max clock speed:", device.max_clock_frequency, 'MHz') - print(" compute units:", device.max_compute_units) - print(" GLOBAL_MEM_SIZE = ", device.get_info( cl.device_info.GLOBAL_MEM_SIZE ) / 4, " float32") - print(" LOCAL_MEM_SIZE = ", device.get_info( cl.device_info.LOCAL_MEM_SIZE ) / 4, " float32") - print(" MAX_CONSTANT_BUFFER_SIZE = ", device.get_info( cl.device_info.MAX_CONSTANT_BUFFER_SIZE ) / 4, " float32") - print(" MAX_WORK_GROUP_SIZE = ", device.get_info( cl.device_info.MAX_WORK_GROUP_SIZE )) + logger.info("---------------------------------------------------------------") + logger.info(" Device name: " + device.name) + logger.info(" type: " + cl.device_type.to_string(device.type)) + logger.info(" memory: " + f"{device.global_mem_size // 1024 // 1024} + MB") + logger.info(" max clock speed: " + f"{device.max_clock_frequency} MHz") + logger.info(" compute units: " + str(device.max_compute_units)) + logger.info(f" GLOBAL_MEM_SIZE = {device.get_info( cl.device_info.GLOBAL_MEM_SIZE ) / 4} float32") + logger.info(f" LOCAL_MEM_SIZE = {device.get_info( cl.device_info.LOCAL_MEM_SIZE ) / 4} float32") + logger.info(f" MAX_CONSTANT_BUFFER_SIZE = {device.get_info( cl.device_info.MAX_CONSTANT_BUFFER_SIZE ) / 4} float32") + logger.info(f" MAX_WORK_GROUP_SIZE = {device.get_info( cl.device_info.MAX_WORK_GROUP_SIZE )}") # fmt: on @@ -83,4 +85,4 @@ def init_env(i_platform=0): def print_platforms(): platforms = get_platforms() for i, plat in enumerate(platforms): - print(f"Platform {i}: {plat.name}") + logger.info(f"Platform {i}: {plat.name}") diff --git a/ppafm/ocl/relax.py b/ppafm/ocl/relax.py index fd17fc56..d0d73ec8 100644 --- a/ppafm/ocl/relax.py +++ b/ppafm/ocl/relax.py @@ -4,7 +4,9 @@ import numpy as np import pyopencl as cl -from .. import common as PPU +from ..logging_utils import get_logger + +logger = get_logger("ocl.relax") # ========== Globals @@ -18,8 +20,6 @@ DEFAULT_relax_params = np.array( [ 0.5 , 0.1 , 0.02, 0.5 ], dtype=np.float32 ) # fmt: on -verbose = 0 - # ========== Functions @@ -43,8 +43,7 @@ def mat3x3to4f(M): def getInvCell(lvec): cell = lvec[1:4, 0:3] invCell = np.transpose(np.linalg.inv(cell)) - if verbose > 0: - print(invCell) + logger.debug(f"invCell: {invCell}") return mat3x3to4f(invCell) @@ -92,7 +91,6 @@ def rotTip(rot, zstep, tipR0=[0.0, 0.0, 4.0]): class RelaxedScanner: - verbose = 0 def __init__(self): self.queue = oclu.queue @@ -112,8 +110,7 @@ def __init__(self): self.cl_feMap = None def updateFEin(self, FEin_cl, bFinish=False): - if verbose > 0: - print(" updateFEin ", FEin_cl, self.cl_ImgIn, self.FEin_shape) + logger.debug(f"updateFEin {FEin_cl} {self.cl_ImgIn} {self.FEin_shape}") if bFinish: self.queue.finish() cl.enqueue_copy(queue=self.queue, src=FEin_cl, dest=self.cl_ImgIn, offset=0, origin=(0, 0, 0), region=self.FEin_shape[:3]) @@ -148,8 +145,7 @@ def prepareAuxMapBuffers(self, bZMap=False, bFEmap=False, atoms=None): self.nAtoms = np.int32(len(atoms)) self.cl_atoms = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=atoms) nbytes += atoms.nbytes - if self.verbose > 0: - print("prepareAuxMapBuffers.nbytes: ", nbytes) + logger.debug(f"prepareAuxMapBuffers.nbytes: {nbytes}") def prepareBuffers(self, FEin_np=None, lvec=None, FEin_cl=None, FEin_shape=None, scan_dim=None, nDimConv=None, nDimConvOut=None, bZMap=False, bFEmap=False, atoms=None): nbytes = 0 @@ -161,15 +157,13 @@ def prepareBuffers(self, FEin_np=None, lvec=None, FEin_cl=None, FEin_shape=None, if FEin_np is not None: self.cl_ImgIn = cl.image_from_array(self.ctx, FEin_np, num_channels=4, mode="r") nbytes += FEin_np.nbytes # TODO make this re-uploadable - if self.verbose > 0: - print("prepareBuffers made self.cl_ImgIn ", self.cl_ImgIn) + logger.debug(f"prepareBuffers made self.cl_ImgIn {self.cl_ImgIn}") else: if FEin_shape is not None: self.FEin_shape = FEin_shape self.image_format = cl.ImageFormat(cl.channel_order.RGBA, cl.channel_type.FLOAT) self.cl_ImgIn = cl.Image(self.ctx, mf.READ_ONLY, self.image_format, shape=FEin_shape[:3], pitches=None, hostbuf=None, is_array=False, buffer=None) - if self.verbose > 0: - print("prepareBuffers made self.cl_ImgIn ", self.cl_ImgIn) + logger.debug(f"prepareBuffers made self.cl_ImgIn {self.cl_ImgIn}") if FEin_cl is not None: self.updateFEin(FEin_cl) self.FEin_cl = FEin_cl @@ -206,12 +200,10 @@ def prepareBuffers(self, FEin_np=None, lvec=None, FEin_cl=None, FEin_shape=None, self.updateAtoms(atoms) nbytes += atoms.nbytes - if self.verbose > 0: - print("prepareBuffers.nbytes: ", nbytes) + logger.debug(f"prepareBuffers.nbytes: {nbytes}") def releaseBuffers(self): - if self.verbose > 0: - print("tryReleaseBuffers self.cl_ImgIn ", self.cl_ImgIn) + logger.debug(f"tryReleaseBuffers self.cl_ImgIn {self.cl_ImgIn}") self.cl_ImgIn.release() self.cl_poss.release() self.cl_FEout.release() @@ -223,8 +215,7 @@ def releaseBuffers(self): self.cl_atoms.release() def tryReleaseBuffers(self): - if self.verbose > 0: - print("tryReleaseBuffers self.cl_ImgIn ", self.cl_ImgIn) + logger.debug(f"tryReleaseBuffers self.cl_ImgIn {self.cl_ImgIn}") try: self.cl_ImgIn.release() except: @@ -283,8 +274,7 @@ def updateBuffers(self, FEin=None, lvec=None, WZconv=None): if FEin is not None: region = FEin.shape[:3] region = region[::-1] - if self.verbose > 0: - print("region : ", region) + logger.debug(f"region : {region}") cl.enqueue_copy(self.queue, self.cl_ImgIn, FEin, origin=(0, 0, 0), region=region) if WZconv is not None: cl.enqueue_copy(self.queue, self.cl_WZconv, WZconv) @@ -300,8 +290,7 @@ def downloadPaths(self): # Make numpy array. Last axis is bigger by one because OCL aligns to multiples of 4 floats. paths = np.empty(self.scan_dim + (4,), dtype=np.float32, order="C") - if self.verbose: - print("paths.shape ", paths.shape) + logger.debug(f"paths.shape {paths.shape}") # Copy from device to host cl.enqueue_copy(self.queue, paths, self.cl_paths)