id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
328,300
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/DiporderPlanar.py
|
maicos.modules.DiporderPlanar.DiporderPlanar
|
import logging
import MDAnalysis as mda
from ..core import ProfilePlanarBase
from ..lib.weights import diporder_weights
from ..lib.util import render_docs, unit_vectors_planar
@render_docs
class DiporderPlanar(ProfilePlanarBase):
"""Cartesian dipolar order parameters.
${DIPORDER_DESCRIPTION}
${CORRELATION_INFO_PLANAR}
Parameters
----------
${ATOMGROUP_PARAMETER}
${ORDER_PARAMETER_PARAMETER}
${PDIM_PLANAR_PARAMETER}
${PROFILE_PLANAR_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_PLANAR_CLASS_ATTRIBUTES}
"""
def __init__(self, atomgroup: mda.AtomGroup, order_parameter: str='P0', pdim: int=2, dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='residues', sym: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='diporder_planar.dat') -> None:
self._locals = locals()
normalization = 'volume' if order_parameter == 'P0' else 'number'
def get_unit_vectors(atomgroup: mda.AtomGroup, grouping: str):
return unit_vectors_planar(atomgroup=atomgroup, grouping=grouping, pdim=pdim)
super().__init__(atomgroup=atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, concfreq=concfreq, dim=dim, zmin=zmin, zmax=zmax, bin_width=bin_width, sym=sym, sym_odd=True, grouping=grouping, bin_method=bin_method, output=output, weighting_function=diporder_weights, weighting_function_kwargs={'order_parameter': order_parameter, 'get_unit_vectors': get_unit_vectors}, normalization=normalization)
def _prepare(self):
logging.info('Analysis of the cartesian dipolar order parameters.')
super()._prepare()
|
@render_docs
class DiporderPlanar(ProfilePlanarBase):
'''Cartesian dipolar order parameters.
${DIPORDER_DESCRIPTION}
${CORRELATION_INFO_PLANAR}
Parameters
----------
${ATOMGROUP_PARAMETER}
${ORDER_PARAMETER_PARAMETER}
${PDIM_PLANAR_PARAMETER}
${PROFILE_PLANAR_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_PLANAR_CLASS_ATTRIBUTES}
'''
def __init__(self, atomgroup: mda.AtomGroup, order_parameter: str='P0', pdim: int=2, dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='residues', sym: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='diporder_planar.dat') -> None:
pass
def get_unit_vectors(atomgroup: mda.AtomGroup, grouping: str):
pass
def _prepare(self):
pass
| 5
| 1
| 19
| 1
| 18
| 0
| 1
| 0.27
| 1
| 5
| 0
| 0
| 2
| 1
| 2
| 31
| 75
| 9
| 52
| 24
| 30
| 14
| 10
| 6
| 6
| 2
| 4
| 0
| 4
|
328,301
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/DiporderSphere.py
|
maicos.modules.DiporderSphere.DiporderSphere
|
from ..lib.weights import diporder_weights
from ..core import ProfileSphereBase
import MDAnalysis as mda
from ..lib.util import render_docs, unit_vectors_sphere
import logging
@render_docs
class DiporderSphere(ProfileSphereBase):
"""Spherical dipolar order parameters.
${DIPORDER_DESCRIPTION}
${CORRELATION_INFO_RADIAL}
Parameters
----------
${ATOMGROUP_PARAMETER}
${ORDER_PARAMETER_PARAMETER}
${PROFILE_SPHERE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_SPHERE_CLASS_ATTRIBUTES}
"""
def __init__(self, atomgroup: mda.AtomGroup, order_parameter: str='P0', rmin: float=0, rmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='residues', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='diporder_sphere.dat') -> None:
normalization = 'volume' if order_parameter == 'P0' else 'number'
def get_unit_vectors(atomgroup: mda.AtomGroup, grouping: str):
return unit_vectors_sphere(atomgroup=atomgroup, grouping=grouping, bin_method=bin_method)
super().__init__(atomgroup=atomgroup, unwrap=unwrap, pack=pack, jitter=jitter, refgroup=refgroup, concfreq=concfreq, rmin=rmin, rmax=rmax, bin_width=bin_width, grouping=grouping, bin_method=bin_method, output=output, weighting_function=diporder_weights, weighting_function_kwargs={'order_parameter': order_parameter, 'get_unit_vectors': get_unit_vectors}, normalization=normalization)
def _prepare(self):
logging.info('Analysis of the spherical dipolar order parameters.')
super()._prepare()
|
@render_docs
class DiporderSphere(ProfileSphereBase):
'''Spherical dipolar order parameters.
${DIPORDER_DESCRIPTION}
${CORRELATION_INFO_RADIAL}
Parameters
----------
${ATOMGROUP_PARAMETER}
${ORDER_PARAMETER_PARAMETER}
${PROFILE_SPHERE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_SPHERE_CLASS_ATTRIBUTES}
'''
def __init__(self, atomgroup: mda.AtomGroup, order_parameter: str='P0', rmin: float=0, rmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='residues', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='diporder_sphere.dat') -> None:
pass
def get_unit_vectors(atomgroup: mda.AtomGroup, grouping: str):
pass
def _prepare(self):
pass
| 5
| 1
| 17
| 1
| 16
| 0
| 1
| 0.29
| 1
| 5
| 0
| 0
| 2
| 0
| 2
| 30
| 67
| 9
| 45
| 20
| 26
| 13
| 9
| 5
| 5
| 2
| 4
| 0
| 4
|
328,302
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/DiporderStructureFactor.py
|
maicos.modules.DiporderStructureFactor.DiporderStructureFactor
|
from ..lib.weights import diporder_weights
from ..lib.math import structure_factor
from ..core import AnalysisBase
from ..lib.util import get_center, render_docs, unit_vectors_planar
import numpy as np
import MDAnalysis as mda
import logging
@render_docs
class DiporderStructureFactor(AnalysisBase):
"""Structure factor for dipoles.
Extension the standard structure factor :math:`S(q)` by weighting it with different
the normalized dipole moment :math:`\\hat{\\boldsymbol{\\mu}}` of a ``group`` according
to
.. math::
S(q)_{\\hat{\\boldsymbol{\\mu}} \\hat{\\boldsymbol{\\mu}}} = \\left \\langle
\\frac{1}{N} \\sum_{i,j=1}^N \\hat \\mu_i \\hat \\mu_j \\, \\exp(-i\\boldsymbol q\\cdot
[\\boldsymbol r_i - \\boldsymbol r_j]) \\right \\rangle
For the correlation time estimation the module will use the value of the structure
factor with the smallest possible :math:`q` value.
For an detailed example on the usage refer to the :ref:`how-to on dipolar
correlation functions <howto-spatial-dipole-dipole-correlations>`. For general
details on the theory behind the structure factor refer to :ref:`saxs-explanations`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${Q_SPACE_PARAMETERS}
${BIN_METHOD_PARAMETER}
${GROUPING_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
results.q : numpy.ndarray
length of binned q-vectors
results.structure_factors : numpy.ndarray
Structure factor
"""
def __init__(self, atomgroup: mda.AtomGroup, qmin: float=0, qmax: float=6, dq: float=0.01, bin_method: str='com', grouping: str='molecules', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='sq.dat') -> None:
self._locals = locals()
super().__init__(atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, wrap_compound=grouping, concfreq=concfreq)
self.bin_method = str(bin_method).lower()
self.qmin = qmin
self.qmax = qmax
self.dq = dq
self.output = output
def _prepare(self) -> None:
logging.info('Analysis of the structure factor of dipoles.')
self.n_bins = int(np.ceil((self.qmax - self.qmin) / self.dq))
def _single_frame(self) -> float:
box = np.diag(mda.lib.mdamath.triclinic_vectors(self._ts.dimensions))
positions = get_center(atomgroup=self.atomgroup, bin_method=self.bin_method, compound=self.wrap_compound)
self._obs.structure_factors = np.zeros(self.n_bins)
for pdim in range(3):
def get_unit_vectors(atomgroup: mda.AtomGroup, grouping: str, pdim: int=pdim):
return unit_vectors_planar(atomgroup=atomgroup, grouping=grouping, pdim=pdim)
weights = diporder_weights(atomgroup=self.atomgroup, grouping=self.wrap_compound, order_parameter='cos_theta', get_unit_vectors=get_unit_vectors)
scattering_vectors, structure_factors = structure_factor(np.double(positions), np.double(box), self.qmin, self.qmax, 0, np.pi, weights)
scattering_vectors = scattering_vectors.flatten()
structure_factors = structure_factors.flatten()
nonzeros = np.where(structure_factors != 0)[0]
scattering_vectors = scattering_vectors[nonzeros]
structure_factors = structure_factors[nonzeros]
histogram_kwargs = dict(a=scattering_vectors, bins=self.n_bins, range=(self.qmin, self.qmax))
structure_factors_binned, _ = np.histogram(weights=structure_factors, **histogram_kwargs)
bincount, _ = np.histogram(weights=None, **histogram_kwargs)
with np.errstate(invalid='ignore'):
structure_factors_binned /= bincount
self._obs.structure_factors += np.nan_to_num(structure_factors_binned)
self._obs.structure_factors /= len(positions)
return self._obs.structure_factors[-1]
def _conclude(self) -> None:
scattering_vectors = np.arange(self.qmin, self.qmax, self.dq) + 0.5 * self.dq
nonzeros = np.where(self.means.structure_factors != 0)[0]
structure_factors = self.means.structure_factors[nonzeros]
self.results.scattering_vectors = scattering_vectors[nonzeros]
self.results.structure_factors = structure_factors
@render_docs
def save(self) -> None:
"""${SAVE_METHOD_DESCRIPTION}"""
self.savetxt(self.output, np.vstack([self.results.scattering_vectors, self.results.structure_factors]).T, columns=['q (1/Å)', 'S(q) (arb. units)'])
|
@render_docs
class DiporderStructureFactor(AnalysisBase):
'''Structure factor for dipoles.
Extension the standard structure factor :math:`S(q)` by weighting it with different
the normalized dipole moment :math:`\hat{\boldsymbol{\mu}}` of a ``group`` according
to
.. math::
S(q)_{\hat{\boldsymbol{\mu}} \hat{\boldsymbol{\mu}}} = \left \langle
\frac{1}{N} \sum_{i,j=1}^N \hat \mu_i \hat \mu_j \, \exp(-i\boldsymbol q\cdot
[\boldsymbol r_i - \boldsymbol r_j]) \right \rangle
For the correlation time estimation the module will use the value of the structure
factor with the smallest possible :math:`q` value.
For an detailed example on the usage refer to the :ref:`how-to on dipolar
correlation functions <howto-spatial-dipole-dipole-correlations>`. For general
details on the theory behind the structure factor refer to :ref:`saxs-explanations`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${Q_SPACE_PARAMETERS}
${BIN_METHOD_PARAMETER}
${GROUPING_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
results.q : numpy.ndarray
length of binned q-vectors
results.structure_factors : numpy.ndarray
Structure factor
'''
def __init__(self, atomgroup: mda.AtomGroup, qmin: float=0, qmax: float=6, dq: float=0.01, bin_method: str='com', grouping: str='molecules', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='sq.dat') -> None:
pass
def _prepare(self) -> None:
pass
def _single_frame(self) -> float:
pass
def get_unit_vectors(atomgroup: mda.AtomGroup, grouping: str, pdim: int=pdim):
pass
def _conclude(self) -> None:
pass
@render_docs
def save(self) -> None:
'''${SAVE_METHOD_DESCRIPTION}'''
pass
| 9
| 2
| 20
| 2
| 17
| 1
| 1
| 0.32
| 1
| 8
| 0
| 0
| 5
| 9
| 5
| 17
| 155
| 26
| 98
| 45
| 74
| 31
| 42
| 26
| 35
| 2
| 2
| 2
| 7
|
328,303
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/KineticEnergy.py
|
maicos.modules.KineticEnergy.KineticEnergy
|
from ..lib.util import get_compound, render_docs
import numpy as np
from ..core import AnalysisBase
import MDAnalysis as mda
import logging
@render_docs
class KineticEnergy(AnalysisBase):
"""Kinetic energy timeseries.
The kinetic energy function computes the translational and rotational kinetic energy
with respect to molecular center (center of mass, center of charge) of a molecular
dynamics simulation trajectory.
The analysis can be applied to study the dynamics of water molecules during an
excitation pulse. For more details read
:footcite:t:`elgabartyEnergyTransferHydrogen2020`.
Parameters
----------
${ATOMGROUP_PARAMETER}
refpoint : str
reference point for molecular center: center of mass (``"com"``) or center of
charge (``"coc"``).
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
results.t : numpy.ndarray
time (ps).
results.trans : numpy.ndarray
translational kinetic energy (kJ/mol).
results.rot : numpy.ndarray
rotational kinetic energy (kJ/mol).
"""
def __init__(self, atomgroup: mda.AtomGroup, refpoint: str='com', refgroup: mda.AtomGroup | None=None, unwrap: bool=False, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='ke.dat') -> None:
self._locals = locals()
self.comp = get_compound(atomgroup)
super().__init__(atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, concfreq=concfreq, wrap_compound=self.comp)
self.output = output
self.refpoint = refpoint.lower()
def _prepare(self) -> None:
"""Set things up before the analysis loop begins."""
logging.info('Analysis of the kinetic energy timeseries.')
if self.refpoint not in ['com', 'coc']:
raise ValueError(f"Invalid choice for dens: {self.refpoint} (choose from 'com' or 'coc')")
self.masses = self.atomgroup.accumulate(self.atomgroup.masses, compound=self.comp)
self.abscharges = self.atomgroup.accumulate(np.abs(self.atomgroup.charges), compound=self.comp)
self.E_kin = np.zeros(self.n_frames)
self.E_center = np.zeros(self.n_frames)
def _single_frame(self) -> None:
self.E_kin[self._frame_index] = np.dot(self.atomgroup.masses, np.linalg.norm(self.atomgroup.velocities, axis=1) ** 2)
if self.refpoint == 'com':
massvel = self.atomgroup.velocities * self.atomgroup.masses[:, np.newaxis]
v = self.atomgroup.accumulate(massvel, compound=get_compound(self.atomgroup))
v /= self.masses[:, np.newaxis]
elif self.refpoint == 'coc':
abschargevel = self.atomgroup.velocities * np.abs(self.atomgroup.charges)[:, np.newaxis]
v = self.atomgroup.accumulate(abschargevel, compound=get_compound(self.atomgroup))
v /= self.abscharges[:, np.newaxis]
self.E_center[self._frame_index] = np.dot(self.masses, np.linalg.norm(v, axis=1) ** 2)
def _conclude(self) -> None:
self.results.t = self.times
self.results.trans = self.E_center / 2 / 100
self.results.rot = (self.E_kin - self.E_center) / 2 / 100
@render_docs
def save(self) -> None:
"""${SAVE_METHOD_DESCRIPTION}"""
self.savetxt(self.output, np.vstack([self.results.t, self.results.trans, self.results.rot]).T, columns=['t', 'E_kin^trans [kJ/mol]', 'E_kin^rot [kJ/mol]'])
|
@render_docs
class KineticEnergy(AnalysisBase):
'''Kinetic energy timeseries.
The kinetic energy function computes the translational and rotational kinetic energy
with respect to molecular center (center of mass, center of charge) of a molecular
dynamics simulation trajectory.
The analysis can be applied to study the dynamics of water molecules during an
excitation pulse. For more details read
:footcite:t:`elgabartyEnergyTransferHydrogen2020`.
Parameters
----------
${ATOMGROUP_PARAMETER}
refpoint : str
reference point for molecular center: center of mass (``"com"``) or center of
charge (``"coc"``).
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
results.t : numpy.ndarray
time (ps).
results.trans : numpy.ndarray
translational kinetic energy (kJ/mol).
results.rot : numpy.ndarray
rotational kinetic energy (kJ/mol).
'''
def __init__(self, atomgroup: mda.AtomGroup, refpoint: str='com', refgroup: mda.AtomGroup | None=None, unwrap: bool=False, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='ke.dat') -> None:
pass
def _prepare(self) -> None:
'''Set things up before the analysis loop begins.'''
pass
def _single_frame(self) -> None:
pass
def _conclude(self) -> None:
pass
@render_docs
def save(self) -> None:
'''${SAVE_METHOD_DESCRIPTION}'''
pass
| 8
| 3
| 16
| 1
| 14
| 1
| 2
| 0.38
| 1
| 6
| 0
| 0
| 5
| 8
| 5
| 17
| 118
| 17
| 73
| 28
| 56
| 28
| 31
| 17
| 25
| 3
| 2
| 1
| 8
|
328,304
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/PDFCylinder.py
|
maicos.modules.PDFCylinder.PDFCylinder
|
import MDAnalysis as mda
import logging
from MDAnalysis.lib.distances import capped_distance
import numpy as np
from ..core import CylinderBase
from ..lib.math import transform_cylinder
from ..lib.util import get_center, get_compound, render_docs
@render_docs
class PDFCylinder(CylinderBase):
"""Shell-wise one-dimensional (cylindrical) pair distribution functions.
The one-dimensional pair distribution functions :math:`g_{\\text{1d}}(\\phi)`
and :math:`g_{\\text{1d}}(z)` describes the pair distribution to particles
which lie on the same cylinder along the angular and axial directions
respectively. These functions can be used in cylindrical systems that are
inhomogeneous along radial coordinate, and homogeneous in the angular and
axial directions. It gives the average number density of :math:`g2` as a
function of angular and axial distances respectively from a :math:`g1` atom.
Then the angular pair distribution function is
.. math::
g_{\\text{1d}}(\\phi) = \\left \\langle \\sum_{i}^{N_{g_1}}
\\sum_{j}^{N_{g2}} \\delta(\\phi - \\phi_{ij}) \\delta(R_{ij}) \\delta(z_{ij})
\\right \\rangle
And the axial pair distribution function is
.. math::
g_{\\text{1d}}(z) = \\left \\langle \\sum_{i}^{N_{g_1}}
\\sum_{j}^{N_{g2}} \\delta(z - z_{ij}) \\delta(R_{ij}) \\delta(\\phi_{ij})
\\right \\rangle
Even though due to consistency reasons the results are called pair distribution
functions the output is not unitless. The default output is is in dimension of
number/volume in :math:`Å^{-3}`. If ``density`` is set to :py:obj:`True`, the
output is normalised by the density of :math:`g2`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${PDF_PARAMETERS}
bin_width_pdf_z : float
Binwidth of bins in the histogram of the axial PDF (Å).
bin_width_pdf_phi : float
Binwidth of bins in the histogram of the angular PDF (Å).
drwidth : float
radial width of a PDF cylindrical shell (Å), and axial or angular (arc) slices.
dmin: float
the minimum pairwise distance between 'g1' and 'g2' (Å).
dmax : float
the maximum pairwise distance between 'g1' and 'g2' (Å).
density : bool
normalise the PDF by the density of 'g2' (:math:`Å^{-3}`).
origin : numpy.ndarray
Set origin of the cylindrical coordinate system (x,y,z). If :obj:`None` the
origin will be set according to the ``refgroup`` parameter.
${CYLINDER_CLASS_PARAMETERS}
${BIN_METHOD_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${CYLINDER_CLASS_ATTRIBUTES}
results.phi_bins: numpy.ndarray
Angular distances to which the PDF is calculated with shape (`pdf_nbins`) (Å)
results.z_bins: numpy.ndarray
axial distances to which the PDF is calculated with shape (`pdf_nbins`) (Å)
results.phi_pdf: numpy.ndarray
Angular PDF with shape (`pdf_nbins`, `n_bins`) (:math:`\\text{Å}^{-3}`)
results.z_pdf: numpy.ndarray
Axial PDF with shape (`pdf_nbins`, `n_bins`) (:math:`\\text{Å}^{-3}`)
"""
def __init__(self, g1: mda.AtomGroup, g2: mda.AtomGroup | None=None, bin_width_pdf_z: float=0.3, bin_width_pdf_phi: float=0.1, drwidth: float=0.1, dmin: float | None=None, dmax: float | None=None, density: bool=False, origin: np.ndarray | None=None, dim: int=2, zmin: float | None=None, zmax: float | None=None, rmin: float=0, rmax: float | None=None, bin_width: float=1, bin_method: str='com', refgroup: mda.AtomGroup | None=None, unwrap: bool=False, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='pdf.dat') -> None:
self.comp_1 = get_compound(g1)
super().__init__(atomgroup=g1, refgroup=refgroup, unwrap=unwrap, pack=pack, concfreq=concfreq, jitter=jitter, dim=dim, rmin=rmin, rmax=rmax, zmin=zmin, zmax=zmax, bin_width=bin_width, wrap_compound=self.comp_1)
self.g1 = g1
if g2 is None:
self.g2 = g1
else:
self.g2 = g2
self.bin_width_pdf_phi = bin_width_pdf_phi
self.bin_width_pdf_z = bin_width_pdf_z
self.drwidth = drwidth
self.bin_width = bin_width
self.output = output
self.bin_method = bin_method.lower()
if origin is not None and origin.shape != (3,):
raise ValueError(f'Origin has length {origin.shape} but only (3,) is allowed.')
self.origin = origin
self.comp_2 = get_compound(self.g2)
self.nbins_pdf_phi = 100
self.nbins_pdf_z = 100
self.dmin = dmin
self.dmax = dmax
self.density = density
def _prepare(self) -> None:
super()._prepare()
logging.info('Analysis of the cylindrical pair distribution function.')
if self.origin is None:
self.origin = self.box_center
if self.dmin is None:
self.dmin = 0
if self.dmax is None:
self.dmax = self.box_center[self.dim]
elif self.dmax > self.box_center[self.dim]:
raise ValueError('Axial range of PDF exceeds half of the box size. This will lead to unexpected results.')
if self.bin_width_pdf_z > 0:
self.nbins_pdf_z = int(np.ceil((self.dmax - self.dmin) / self.bin_width_pdf_z))
self.bin_width_pdf_z = (self.dmax - self.dmin) / self.nbins_pdf_z
else:
raise ValueError('PDF bin_width must be a positive number.')
if self.bin_width_pdf_phi > 0:
self.nbins_pdf_phi = int(np.ceil(np.pi / self.bin_width_pdf_phi))
self.bin_width_pdf_phi = np.pi / self.nbins_pdf_phi
else:
raise ValueError('PDF bin_width must be a positive number.')
if self.bin_method not in ['cog', 'com', 'coc']:
raise ValueError(f'{self.bin_method} is an unknown binning method. Use `cog`, `com` or `coc`.')
logging.info(f'Using {self.nbins_pdf_phi} pdf bins in phi direction and {self.nbins_pdf_z} in z direction.')
def _single_frame(self) -> None:
super()._single_frame()
self._obs.n_g1 = np.zeros((self.n_bins, 1))
self._obs.n_g2 = np.zeros((self.n_bins, 1))
self._obs.count_phi = np.zeros((self.n_bins, self.nbins_pdf_phi))
self._obs.count_z = np.zeros((self.n_bins, self.nbins_pdf_z))
g1_bin_positions = get_center(atomgroup=self.g1, bin_method=self.bin_method, compound=self.comp_1)
g2_bin_positions = get_center(atomgroup=self.g2, bin_method=self.bin_method, compound=self.comp_2)
g1_bin_positions_cyl = transform_cylinder(g1_bin_positions, origin=self.origin, dim=self.dim)
g2_bin_positions_cyl = transform_cylinder(g2_bin_positions, origin=self.origin, dim=self.dim)
for r_bin in range(0, self.n_bins):
g1_in_rbin_positions = g1_bin_positions_cyl[np.logical_and(g1_bin_positions_cyl[:, 0] >= self._obs.bin_edges[r_bin], g1_bin_positions_cyl[:, 0] < self._obs.bin_edges[r_bin + 1])]
g2_in_rbin_positions = g2_bin_positions_cyl[np.logical_and(g2_bin_positions_cyl[:, 0] >= self._obs.bin_edges[r_bin] - self.drwidth, g2_bin_positions_cyl[:, 0] < self._obs.bin_edges[r_bin + 1] + self.drwidth)]
self._obs.n_g1[r_bin] = len(g1_in_rbin_positions)
self._obs.n_g2[r_bin] = len(g2_in_rbin_positions)
r_pairs = capped_distance(g1_in_rbin_positions * [1, 0, 0], g2_in_rbin_positions * [1, 0, 0], self.drwidth, box=None, return_distances=False)
phi_pairs = capped_distance(g1_in_rbin_positions * [0, 1, 0], g2_in_rbin_positions * [0, 1, 0], self.drwidth / self._obs.bin_pos[r_bin], box=[0, 2 * np.pi, 0, 90, 90, 90], return_distances=False)
z_pairs = capped_distance(g1_in_rbin_positions * [0, 0, 1], g2_in_rbin_positions * [0, 0, 1], self.drwidth, box=[0, 0, self._universe.dimensions[self.dim], 90, 90, 90], return_distances=False)
phi_dist_pairs, phi_distances = capped_distance(g1_in_rbin_positions * [0, 1, 0], g2_in_rbin_positions * [0, 1, 0], np.pi, box=[0, 2 * np.pi, 0, 90, 90, 90])
z_dist_pairs, z_distances = capped_distance(g1_in_rbin_positions * [0, 0, 1], g2_in_rbin_positions * [0, 0, 1], self.dmax, box=[0, 0, self._universe.dimensions[self.dim], 90, 90, 90])
r_pairs_encode = r_pairs[:, 0] + self._obs.n_g2[r_bin] * r_pairs[:, 1]
phi_pairs_encode = phi_pairs[:, 0] + self._obs.n_g2[r_bin] * phi_pairs[:, 1]
z_pairs_encode = z_pairs[:, 0] + self._obs.n_g2[r_bin] * z_pairs[:, 1]
phi_dist_pairs_encode = phi_dist_pairs[:, 0] + self._obs.n_g2[r_bin] * phi_dist_pairs[:, 1]
z_dist_pairs_encode = z_dist_pairs[:, 0] + self._obs.n_g2[r_bin] * z_dist_pairs[:, 1]
mask_in_dr_and_dz = np.isin(phi_dist_pairs_encode, r_pairs_encode) * np.isin(phi_dist_pairs_encode, z_pairs_encode)
mask_in_dr_and_dphi = np.isin(z_dist_pairs_encode, r_pairs_encode) * np.isin(z_dist_pairs_encode, phi_pairs_encode)
mask_same_atom = phi_distances > 0
relevant_phi_distances = phi_distances[mask_in_dr_and_dz * mask_same_atom]
mask_same_atom = z_distances > 0
relevant_z_distances = z_distances[mask_in_dr_and_dphi * mask_same_atom]
self._obs.count_phi[r_bin] = np.histogram(relevant_phi_distances, bins=self.nbins_pdf_phi, range=(0, np.pi))[0]
self._obs.count_z[r_bin] = np.histogram(relevant_z_distances, bins=self.nbins_pdf_z, range=(self.dmin, self.dmax))[0]
def _conclude(self) -> None:
super()._conclude()
g2_density = self.means.n_g2 / self.means.bin_volume if self.density else 1
phi_norm = np.array([2 * (self.means.bin_edges[1:] + self.means.bin_edges[:-1]) / 2 * self.bin_width_pdf_phi * 2 * self.drwidth * 2 * self.drwidth]).T * g2_density
z_norm = 2 * self.bin_width_pdf_z * 2 * self.drwidth * 2 * self.drwidth * g2_density
with np.errstate(invalid='ignore', divide='ignore'):
pdf_phi = self.means.count_phi / self.means.n_g1 / phi_norm
self.results.pdf_phi = np.nan_to_num(pdf_phi, nan=0, posinf=0, neginf=0)
with np.errstate(invalid='ignore', divide='ignore'):
pdf_z = self.means.count_z / self.means.n_g1 / z_norm
self.results.pdf_z = np.nan_to_num(pdf_z, nan=0, posinf=0, neginf=0)
edges_phi = np.histogram([-1], bins=self.nbins_pdf_phi, range=(0, np.pi))[1]
edges_z = np.histogram([-1], bins=self.nbins_pdf_z, range=(self.dmin, self.dmax))[1]
self.results.bins_phi = 0.5 * (edges_phi[1:] + edges_phi[:-1])
self.results.bins_z = 0.5 * (edges_z[1:] + edges_z[:-1])
@render_docs
def save(self) -> None:
"""${SAVE_METHOD_DESCRIPTION}"""
columns = ['r [Å]']
for r in self.results.bin_pos:
columns.append(f'pdf at {r:.2f} Å [Å^-3]')
self.savetxt('phi_' + self.output, np.hstack([self.results.bins_phi[:, np.newaxis], self.results.pdf_phi.T]), columns=columns)
self.savetxt('z_' + self.output, np.hstack([self.results.bins_z[:, np.newaxis], self.results.pdf_z.T]), columns=columns)
|
@render_docs
class PDFCylinder(CylinderBase):
'''Shell-wise one-dimensional (cylindrical) pair distribution functions.
The one-dimensional pair distribution functions :math:`g_{\text{1d}}(\phi)`
and :math:`g_{\text{1d}}(z)` describes the pair distribution to particles
which lie on the same cylinder along the angular and axial directions
respectively. These functions can be used in cylindrical systems that are
inhomogeneous along radial coordinate, and homogeneous in the angular and
axial directions. It gives the average number density of :math:`g2` as a
function of angular and axial distances respectively from a :math:`g1` atom.
Then the angular pair distribution function is
.. math::
g_{\text{1d}}(\phi) = \left \langle \sum_{i}^{N_{g_1}}
\sum_{j}^{N_{g2}} \delta(\phi - \phi_{ij}) \delta(R_{ij}) \delta(z_{ij})
\right \rangle
And the axial pair distribution function is
.. math::
g_{\text{1d}}(z) = \left \langle \sum_{i}^{N_{g_1}}
\sum_{j}^{N_{g2}} \delta(z - z_{ij}) \delta(R_{ij}) \delta(\phi_{ij})
\right \rangle
Even though due to consistency reasons the results are called pair distribution
functions the output is not unitless. The default output is is in dimension of
number/volume in :math:`Å^{-3}`. If ``density`` is set to :py:obj:`True`, the
output is normalised by the density of :math:`g2`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${PDF_PARAMETERS}
bin_width_pdf_z : float
Binwidth of bins in the histogram of the axial PDF (Å).
bin_width_pdf_phi : float
Binwidth of bins in the histogram of the angular PDF (Å).
drwidth : float
radial width of a PDF cylindrical shell (Å), and axial or angular (arc) slices.
dmin: float
the minimum pairwise distance between 'g1' and 'g2' (Å).
dmax : float
the maximum pairwise distance between 'g1' and 'g2' (Å).
density : bool
normalise the PDF by the density of 'g2' (:math:`Å^{-3}`).
origin : numpy.ndarray
Set origin of the cylindrical coordinate system (x,y,z). If :obj:`None` the
origin will be set according to the ``refgroup`` parameter.
${CYLINDER_CLASS_PARAMETERS}
${BIN_METHOD_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${CYLINDER_CLASS_ATTRIBUTES}
results.phi_bins: numpy.ndarray
Angular distances to which the PDF is calculated with shape (`pdf_nbins`) (Å)
results.z_bins: numpy.ndarray
axial distances to which the PDF is calculated with shape (`pdf_nbins`) (Å)
results.phi_pdf: numpy.ndarray
Angular PDF with shape (`pdf_nbins`, `n_bins`) (:math:`\text{Å}^{-3}`)
results.z_pdf: numpy.ndarray
Axial PDF with shape (`pdf_nbins`, `n_bins`) (:math:`\text{Å}^{-3}`)
'''
def __init__(self, g1: mda.AtomGroup, g2: mda.AtomGroup | None=None, bin_width_pdf_z: float=0.3, bin_width_pdf_phi: float=0.1, drwidth: float=0.1, dmin: float | None=None, dmax: float | None=None, density: bool=False, origin: np.ndarray | None=None, dim: int=2, zmin: float | None=None, zmax: float | None=None, rmin: float=0, rmax: float | None=None, bin_width: float=1, bin_method: str='com', refgroup: mda.AtomGroup | None=None, unwrap: bool=False, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='pdf.dat') -> None:
pass
def _prepare(self) -> None:
pass
def _single_frame(self) -> None:
pass
def _conclude(self) -> None:
pass
@render_docs
def save(self) -> None:
'''${SAVE_METHOD_DESCRIPTION}'''
pass
| 8
| 2
| 63
| 7
| 51
| 6
| 3
| 0.34
| 1
| 8
| 0
| 0
| 5
| 16
| 5
| 28
| 392
| 52
| 257
| 78
| 226
| 87
| 99
| 53
| 93
| 8
| 4
| 2
| 17
|
328,305
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/PDFPlanar.py
|
maicos.modules.PDFPlanar.PDFPlanar
|
from MDAnalysis.lib.distances import capped_distance
import numpy as np
from ..lib.util import get_center, get_compound, render_docs
import logging
from ..core import PlanarBase
import MDAnalysis as mda
@render_docs
class PDFPlanar(PlanarBase):
"""Slab-wise planar 2D pair distribution functions.
The pair distribution function :math:`g_\\mathrm{2D}(r)` describes the
spatial correlation between atoms in :math:`g_1` and atoms in
:math:`g_2`, which lie in the same plane.
It gives the average number density of :math:`g_2` atoms as a function of lateral
distance :math:`r` from a centered :math:`g_1` atom.
PDFPlanar can be used in systems that are inhomogeneous along one axis,
and homogeneous in a plane.
In fully homogeneous systems and in the limit of small 'dzheight'
:math:`\\Delta z`, it is the same as the well known three dimensional PDF.
The planar PDF is defined by
.. math::
g_\\mathrm{2D}(r) = \\left \\langle
\\frac{1}{N_{g1}} \\cdot \\sum_{i}^{N_{g1}} \\sum_{j}^{N_{g2}}
\\frac{1}{2 \\pi r} \\delta(r - r_{ij}) \\delta(z_{ij})
\\right \\rangle .
where the brackets :math:`\\langle \\cdot \\rangle` denote the ensemble
average. :math:`\\delta(r- r_{ij})` counts the :math:`g_2` atoms at distance
:math:`r` from atom :math:`i`.
:math:`\\delta(z_{ij})` ensures that only atoms, which lie
in the same plane :math:`z_i = z_j`, are considered for the PDF.
Discretized for computational purposes the equation reads as
.. math::
g_\\mathrm{2D}(r) =
\\frac{1}{N_{g1}} \\cdot \\sum_{i}^{N_{g1}} \\frac{\\mathrm{count}\\; g_2 \\;
\\mathrm{in}\\; \\Delta V_i(r) }{\\Delta V_i(r)} .
where :math:`\\Delta V_i(r)` is a ring around atom i, with inner
radius :math:`r - \\frac{\\Delta r}{2}`, outer radius
:math:`r + \\frac{\\Delta r}{2}` and height :math:`2 \\Delta z`.
As the density to normalise the PDF with is unknown, the output is in
the dimension of number/volume in 1/Å^3.
Functionally, PDFPlanar bins all pairwise :math:`g_1`-:math:`g_2` distances,
where the z distance is smaller than 'dzheight' in a histogram.
For a more detailed explanation refer to
:ref:`Explanation: PDF<pdfs-explanation>` and
:ref:`PDFPlanar Derivation<pdfplanar-derivation>`
Parameters
----------
${PDF_PARAMETERS}
pdf_bin_width : float
Binwidth of bins in the histogram of the PDF (Å).
dzheight : float
dz height of a PDF slab :math:`\\Delta z` (Å). :math:`\\Delta z` is
introduced to discretize the delta function :math:`\\delta(z_{ij})`.
It is the maximum :math:`z` distance between atoms which are
considered to lie in the same plane.
In the limit of :math:`\\Delta z \\to 0`, PDFPlanar reaches the
continous limit. However, if :math:`\\Delta z` is too small, there
are no atoms in ``g2`` to sample.
We recommend a choice of :math:`\\Delta z` that is 1/10th of
a bond length.
dmin : float
Minimum pairwise distance between ``g1`` and ``g2`` (Å).
dmax : float
Maximum pairwise distance between ``g1`` and ``g2`` (Å).
${PLANAR_CLASS_PARAMETERS}
${BIN_METHOD_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PLANAR_CLASS_ATTRIBUTES}
results.bins: numpy.ndarray
distances to which the PDF is calculated with shape (pdf_nbins) (Å)
results.pdf: np.ndrray
PDF with shape (pdf_nbins, n_bins) (1/Å^3)
"""
def __init__(self, g1: mda.AtomGroup, g2: mda.AtomGroup | None=None, pdf_bin_width: float=0.3, dzheight: float=0.1, dmin: float=0.0, dmax: float | None=None, dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=1, bin_method: str='com', refgroup: mda.AtomGroup | None=None, unwrap: bool=False, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='pdf.dat') -> None:
self._locals = locals()
self.comp_1 = get_compound(g1)
super().__init__(atomgroup=g1, refgroup=refgroup, unwrap=unwrap, pack=pack, concfreq=concfreq, jitter=jitter, dim=dim, zmin=zmin, zmax=zmax, bin_width=bin_width, wrap_compound=self.comp_1)
self.g1 = g1
if g2 is None:
self.g2 = g1
else:
self.g2 = g2
self.dmin = dmin
self.dmax = dmax
self.pdf_bin_width = pdf_bin_width
self.dzheight = dzheight
self.output = output
self.bin_method = bin_method.lower()
self.comp_2 = get_compound(self.g2)
def _prepare(self) -> None:
super()._prepare()
logging.info('Analysis of the planar pair distribution function.')
half_of_box_size = min(self.box_center)
if self.dmax is None:
self.dmax = min(self.box_center)
logging.info('Setting maximum range of PDF to half the box size ({self.range[1]} Å).')
elif self.dmax > min(self.box_center):
raise ValueError(f'Range of PDF exceeds half of the box size. Set to smaller than {half_of_box_size} Å.')
try:
if self.pdf_bin_width > 0:
self.pdf_nbins = int(np.ceil((self.dmax - self.dmin) / self.pdf_bin_width))
else:
raise ValueError('PDF bin_width must be a positive number.')
except TypeError as err:
raise ValueError('PDF bin_width must be a number.') from err
if self.bin_method not in ['cog', 'com', 'coc']:
raise ValueError(f'{self.bin_method} is an unknown binning method. Use `cog`, `com` or `coc`.')
logging.info(f'Using {self.pdf_nbins} pdf bins.')
self.edges = np.histogram([-1], bins=self.pdf_nbins, range=(self.dmin, self.dmax))[1]
self.results.bins = 0.5 * (self.edges[:-1] + self.edges[1:])
self._maxrange = self.dmax
def _single_frame(self) -> None:
super()._single_frame()
self._obs.n_g1 = np.zeros((self.n_bins, 1))
self._obs.count = np.zeros((self.n_bins, self.pdf_nbins))
bin_width = (self.zmax - self.zmin) / self.n_bins
g1_bin_positions = get_center(atomgroup=self.g1, bin_method=self.bin_method, compound=self.comp_1)
g2_bin_positions = get_center(atomgroup=self.g2, bin_method=self.bin_method, compound=self.comp_2)
for z_bin in range(0, self.n_bins):
z_min = self.zmin + bin_width * z_bin
z_max = self.zmin + bin_width * (z_bin + 1)
g1_in_zbin_positions = g1_bin_positions[np.logical_and(g1_bin_positions[:, self.dim] >= z_min, g1_bin_positions[:, self.dim] < z_max)]
g2_in_zbin_positions = g2_bin_positions[np.logical_and(g2_bin_positions[:, self.dim] >= z_min - self.dzheight, g2_bin_positions[:, self.dim] < z_max + self.dzheight)]
n_g1 = len(g1_in_zbin_positions)
n_g2 = len(g2_in_zbin_positions)
self._obs.n_g1[z_bin] = n_g1
z_g1 = np.copy(g1_in_zbin_positions)
z_g2 = np.copy(g2_in_zbin_positions)
z_g1[:, self.odims] = 0
z_g2[:, self.odims] = 0
z_pairs, _ = capped_distance(z_g1, z_g2, self.dzheight, box=self._universe.dimensions)
pairs, xy_distances = capped_distance(g1_in_zbin_positions, g2_in_zbin_positions, self._maxrange, box=self._universe.dimensions)
z_pairs_encode = z_pairs[:, 0] + n_g2 * z_pairs[:, 1]
pairs_encode = pairs[:, 0] + n_g2 * pairs[:, 1]
mask_in_dz = np.isin(pairs_encode, z_pairs_encode)
mask_different_atoms = np.where(xy_distances > 0, True, False)
relevant_xy_distances = xy_distances[mask_in_dz * mask_different_atoms]
self._obs.count[z_bin] = np.histogram(relevant_xy_distances, bins=self.pdf_nbins, range=(self.dmin, self.dmax))[0]
def _conclude(self) -> None:
super()._conclude()
ring_volumes = np.pi * (self.edges[1:] ** 2 - self.edges[:-1] ** 2) * 2 * self.dzheight
ring_volumes = np.expand_dims(ring_volumes, axis=0)
self.results.bins = self.results.bins
self.results.pdf = self.means.count / self.means.n_g1 / ring_volumes
self.results.pdf = np.nan_to_num(self.results.pdf.T, nan=0)
@render_docs
def save(self) -> None:
"""${SAVE_METHOD_DESCRIPTION}"""
columns = ['r [Å]']
for z in self.results.bin_pos:
columns.append(f'pdf at {z:.2f} Å [Å^-3]')
self.savetxt(self.output, np.hstack([self.results.bins[:, np.newaxis], self.results.pdf]), columns=columns)
|
@render_docs
class PDFPlanar(PlanarBase):
'''Slab-wise planar 2D pair distribution functions.
The pair distribution function :math:`g_\mathrm{2D}(r)` describes the
spatial correlation between atoms in :math:`g_1` and atoms in
:math:`g_2`, which lie in the same plane.
It gives the average number density of :math:`g_2` atoms as a function of lateral
distance :math:`r` from a centered :math:`g_1` atom.
PDFPlanar can be used in systems that are inhomogeneous along one axis,
and homogeneous in a plane.
In fully homogeneous systems and in the limit of small 'dzheight'
:math:`\Delta z`, it is the same as the well known three dimensional PDF.
The planar PDF is defined by
.. math::
g_\mathrm{2D}(r) = \left \langle
\frac{1}{N_{g1}} \cdot \sum_{i}^{N_{g1}} \sum_{j}^{N_{g2}}
\frac{1}{2 \pi r} \delta(r - r_{ij}) \delta(z_{ij})
\right \rangle .
where the brackets :math:`\langle \cdot \rangle` denote the ensemble
average. :math:`\delta(r- r_{ij})` counts the :math:`g_2` atoms at distance
:math:`r` from atom :math:`i`.
:math:`\delta(z_{ij})` ensures that only atoms, which lie
in the same plane :math:`z_i = z_j`, are considered for the PDF.
Discretized for computational purposes the equation reads as
.. math::
g_\mathrm{2D}(r) =
\frac{1}{N_{g1}} \cdot \sum_{i}^{N_{g1}} \frac{\mathrm{count}\; g_2 \;
\mathrm{in}\; \Delta V_i(r) }{\Delta V_i(r)} .
where :math:`\Delta V_i(r)` is a ring around atom i, with inner
radius :math:`r - \frac{\Delta r}{2}`, outer radius
:math:`r + \frac{\Delta r}{2}` and height :math:`2 \Delta z`.
As the density to normalise the PDF with is unknown, the output is in
the dimension of number/volume in 1/Å^3.
Functionally, PDFPlanar bins all pairwise :math:`g_1`-:math:`g_2` distances,
where the z distance is smaller than 'dzheight' in a histogram.
For a more detailed explanation refer to
:ref:`Explanation: PDF<pdfs-explanation>` and
:ref:`PDFPlanar Derivation<pdfplanar-derivation>`
Parameters
----------
${PDF_PARAMETERS}
pdf_bin_width : float
Binwidth of bins in the histogram of the PDF (Å).
dzheight : float
dz height of a PDF slab :math:`\Delta z` (Å). :math:`\Delta z` is
introduced to discretize the delta function :math:`\delta(z_{ij})`.
It is the maximum :math:`z` distance between atoms which are
considered to lie in the same plane.
In the limit of :math:`\Delta z \to 0`, PDFPlanar reaches the
continous limit. However, if :math:`\Delta z` is too small, there
are no atoms in ``g2`` to sample.
We recommend a choice of :math:`\Delta z` that is 1/10th of
a bond length.
dmin : float
Minimum pairwise distance between ``g1`` and ``g2`` (Å).
dmax : float
Maximum pairwise distance between ``g1`` and ``g2`` (Å).
${PLANAR_CLASS_PARAMETERS}
${BIN_METHOD_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PLANAR_CLASS_ATTRIBUTES}
results.bins: numpy.ndarray
distances to which the PDF is calculated with shape (pdf_nbins) (Å)
results.pdf: np.ndrray
PDF with shape (pdf_nbins, n_bins) (1/Å^3)
'''
def __init__(self, g1: mda.AtomGroup, g2: mda.AtomGroup | None=None, pdf_bin_width: float=0.3, dzheight: float=0.1, dmin: float=0.0, dmax: float | None=None, dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=1, bin_method: str='com', refgroup: mda.AtomGroup | None=None, unwrap: bool=False, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='pdf.dat') -> None:
pass
def _prepare(self) -> None:
pass
def _single_frame(self) -> None:
pass
def _conclude(self) -> None:
pass
@render_docs
def save(self) -> None:
'''${SAVE_METHOD_DESCRIPTION}'''
pass
| 8
| 2
| 37
| 4
| 30
| 3
| 3
| 0.53
| 1
| 8
| 0
| 0
| 5
| 14
| 5
| 23
| 273
| 42
| 151
| 64
| 125
| 80
| 75
| 43
| 69
| 6
| 3
| 2
| 13
|
328,306
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/RDFDiporder.py
|
maicos.modules.RDFDiporder.RDFDiporder
|
import logging
import numpy as np
import MDAnalysis as mda
from ..core import AnalysisBase
from ..lib.util import get_center, render_docs
from MDAnalysis.lib import distances
from ..lib.weights import diporder_pair_weights
@render_docs
class RDFDiporder(AnalysisBase):
"""Spherical Radial Distribution function between dipoles.
The implementation is heavily inspired by :class:`MDAnalysis.analysis.rdf.InterRDF`
and is according to :footcite:t:`zhang_dipolar_2014` given by
.. math::
g_\\mathrm{\\hat{\\boldsymbol{\\mu}}, \\hat{\\boldsymbol{\\mu}}}(r) = \\frac{1}{N}
\\left\\langle \\sum_i \\frac{1}{n_i(r)} \\sum_{j=1}^{n_i(r)}
(\\hat{\\boldsymbol{\\mu}}_i \\cdot \\hat{\\boldsymbol{\\mu}}_j) \\right \\rangle
where :math:`\\hat{\\boldsymbol{\\mu}}` is the normalized dipole moment of a
``grouping`` and :math:`n_i(r)` is the number of dipoles within a spherical shell of
distance :math:`r` and :math:`r + \\delta r` from dipole :math:`i`.
For the correlation time estimation the module will use the value of the RDF with
the largest possible :math:`r` value.
For an detailed example on the usage refer to the :ref:`how-to on dipolar
correlation functions <howto-spatial-dipole-dipole-correlations>`.
Parameters
----------
${PDF_PARAMETERS}
norm : str, {'rdf', 'density', 'none'}
For 'rdf' calculate :math:`g_{ab}(r)`. For 'density' the single group
density :math:`n_{ab}(r)` is computed. 'none' computes the number of
particles occurences in each spherical shell.
${RADIAL_CLASS_PARAMETERS}
${BIN_WIDTH_PARAMETER}
${BIN_METHOD_PARAMETER}
${GROUPING_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
results.bins: numpy.ndarray
radial distances to which the RDF is calculated with shape (``rdf_nbins``) (Å)
results.rdf: numpy.ndarray
RDF either in :math:`\\text{eÅ}^{-2}` if norm is ``"rdf"`` or ``"density"`` or
:math:`\\text{eÅ}` if norm is ``"none"``.
"""
def __init__(self, g1: mda.AtomGroup, g2: mda.AtomGroup | None=None, norm: str='rdf', rmin: float=0.0, rmax: float=15.0, bin_width: float=0.1, bin_method: str='com', grouping: str='residues', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='diporderrdf.dat') -> None:
self._locals = locals()
super().__init__(g1, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, wrap_compound=grouping, concfreq=concfreq)
self.g1 = g1
if g2 is None:
self.g2 = g1
else:
self.g2 = g2
self.bin_width = bin_width
self.rmin = rmin
self.rmax = rmax
self.bin_method = str(bin_method).lower()
self.norm = norm
self.output = output
def _prepare(self):
logging.info('Analysis of the spherical radial distribution function for dipoles.')
self.n_bins = int(np.ceil((self.rmax - self.rmin) / self.bin_width))
supported_norms = ['rdf', 'density', 'none']
if self.norm not in supported_norms:
raise ValueError(f"'{self.norm}' is an invalid `norm`. Choose from: {', '.join(supported_norms)}")
def _single_frame(self):
if self.unwrap:
self.g1.unwrap(compound=self.wrap_compound)
self.g2.unwrap(compound=self.wrap_compound)
pos_1 = get_center(self.g1, bin_method=self.bin_method, compound=self.wrap_compound)
pos_2 = get_center(self.g2, bin_method=self.bin_method, compound=self.wrap_compound)
pairs, dist = distances.capped_distance(pos_1, pos_2, min_cutoff=self.rmin, max_cutoff=self.rmax, box=self._ts.dimensions)
weights = diporder_pair_weights(self.g1, self.g2, compound=self.wrap_compound)
weights_sel = np.array([weights[ix[0], ix[1]] for ix in pairs])
self._obs.profile, _ = np.histogram(a=dist, bins=self.n_bins, range=(self.rmin, self.rmax), weights=weights_sel)
if self.norm == 'rdf':
self._obs.volume = self._ts.volume
return self._obs.profile[-1]
def _conclude(self):
_, edges = np.histogram(a=[-1], bins=self.n_bins, range=(self.rmin, self.rmax))
self.results.bins = 0.5 * (edges[:-1] + edges[1:])
norm = 1
if self.norm in ['rdf', 'density']:
vols = np.power(edges, 3)
norm *= 4 / 3 * np.pi * np.diff(vols)
if self.norm == 'rdf':
if self.wrap_compound != 'molecules':
nA = getattr(self.g1, f'n_{self.wrap_compound}')
nB = getattr(self.g2, f'n_{self.wrap_compound}')
else:
nA = len(np.unique(self.g1.molnums))
nB = len(np.unique(self.g1.molnums))
N = nA * nB
norm *= N / self.means.volume
self.results.rdf = self.means.profile / norm
@render_docs
def save(self) -> None:
"""${SAVE_METHOD_DESCRIPTION}"""
columns = ['r (Å)', 'rdf']
if self.norm in ['rdf', 'density']:
columns[1] += ' (Å^3)'
self.savetxt(self.output, np.vstack([self.results.bins, self.results.rdf]).T, columns=columns)
|
@render_docs
class RDFDiporder(AnalysisBase):
'''Spherical Radial Distribution function between dipoles.
The implementation is heavily inspired by :class:`MDAnalysis.analysis.rdf.InterRDF`
and is according to :footcite:t:`zhang_dipolar_2014` given by
.. math::
g_\mathrm{\hat{\boldsymbol{\mu}}, \hat{\boldsymbol{\mu}}}(r) = \frac{1}{N}
\left\langle \sum_i \frac{1}{n_i(r)} \sum_{j=1}^{n_i(r)}
(\hat{\boldsymbol{\mu}}_i \cdot \hat{\boldsymbol{\mu}}_j) \right \rangle
where :math:`\hat{\boldsymbol{\mu}}` is the normalized dipole moment of a
``grouping`` and :math:`n_i(r)` is the number of dipoles within a spherical shell of
distance :math:`r` and :math:`r + \delta r` from dipole :math:`i`.
For the correlation time estimation the module will use the value of the RDF with
the largest possible :math:`r` value.
For an detailed example on the usage refer to the :ref:`how-to on dipolar
correlation functions <howto-spatial-dipole-dipole-correlations>`.
Parameters
----------
${PDF_PARAMETERS}
norm : str, {'rdf', 'density', 'none'}
For 'rdf' calculate :math:`g_{ab}(r)`. For 'density' the single group
density :math:`n_{ab}(r)` is computed. 'none' computes the number of
particles occurences in each spherical shell.
${RADIAL_CLASS_PARAMETERS}
${BIN_WIDTH_PARAMETER}
${BIN_METHOD_PARAMETER}
${GROUPING_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
results.bins: numpy.ndarray
radial distances to which the RDF is calculated with shape (``rdf_nbins``) (Å)
results.rdf: numpy.ndarray
RDF either in :math:`\text{eÅ}^{-2}` if norm is ``"rdf"`` or ``"density"`` or
:math:`\text{eÅ}` if norm is ``"none"``.
'''
def __init__(self, g1: mda.AtomGroup, g2: mda.AtomGroup | None=None, norm: str='rdf', rmin: float=0.0, rmax: float=15.0, bin_width: float=0.1, bin_method: str='com', grouping: str='residues', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='diporderrdf.dat') -> None:
pass
def _prepare(self):
pass
def _single_frame(self):
pass
def _conclude(self):
pass
@render_docs
def save(self) -> None:
'''${SAVE_METHOD_DESCRIPTION}'''
pass
| 8
| 2
| 25
| 3
| 21
| 1
| 3
| 0.37
| 1
| 6
| 0
| 0
| 5
| 10
| 5
| 17
| 173
| 29
| 105
| 47
| 82
| 39
| 54
| 30
| 48
| 4
| 2
| 2
| 13
|
328,307
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/Saxs.py
|
maicos.modules.Saxs.Saxs
|
from ..lib.util import render_docs
import numpy as np
from ..lib.math import atomic_form_factor, structure_factor
import logging
import MDAnalysis as mda
from ..core import AnalysisBase
@render_docs
class Saxs(AnalysisBase):
"""Small angle X-Ray scattering intensities (SAXS).
This module computes the structure factor :math:`S(q)`, the scattering intensity
(sometimes also called scattering factor) :math:`I(q)` and their corresponding
scattering vectors :math:`q`. For a system containing only one element the structure
factor and the scattering intensity are connected via the atomic form factor
:math:`f(q)`
.. math::
I(q) = [f(q)]^2 S(q)
For more details on the theory behind this module see :ref:`saxs-explanations`.
By default the scattering vectors :math:`\\boldsymbol{q}` are binned according to
their length :math:`q` using a bin width given by ``dq``. Setting the option
``bin_spectrum=False``, also the raw scattering vectors and their corresponding
Miller indices can be saved. Saving the scattering vectors and Miller indices is
only possible when the box vectors are constant in the whole trajectory (NVT) since
for changing cells the same Miller indices correspond to different scattering
vectors.
.. warning::
Please be aware that in simulations where the box vectors change, the q-vectors
will differ between frames. Artifacts can arise when the data contains poorly
sampled q-vectors.
Analyzed scattering vectors :math:`q` can be restricted by a minimal and maximal
angle with the z-axis. For ``0`` and ``180``, all possible vectors are taken into
account. To obtain the scattering intensities, the structure factor is normalized by
an element-specific atomic form factor based on Cromer-Mann parameters
:footcite:t:`princeInternationalTablesCrystallography2004`.
For the correlation time estimation the module will use the value of the scattering
intensity with the largest possible :math:`q` value.
For an example on the usage refer to :ref:`How-to: SAXS<howto-saxs>`.
Parameters
----------
${ATOMGROUP_PARAMETER}
bin_spectrum : bool
Bin the spectrum. If :py:obj:`False` Miller indices of q-vector are returned.
Only works for NVT simulations.
${Q_SPACE_PARAMETERS}
thetamin : float
Minimal angle (°) between the q vectors and the z-axis.
thetamax : float
Maximal angle (°) between the q vectors and the z-axis.
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
results.scattering_vectors : numpy.ndarray
Length of the binned scattering vectors.
results.miller_indices : numpy.ndarray
Miller indices of q-vector (only available if ``bin_spectrum==False``).
results.struture_factors : numpy.ndarray
structure factors :math:`S(q)`
results.scattering_intensities : numpy.ndarray
scattering intensities :math:`I(q)`
results.dstruture_factors : numpy.ndarray
standard error of the structure factors :math:`S(q)`
(only available if ``bin_spectrum==True``).
structure factors :math:`S(q)`
results.dscattering_intensities : numpy.ndarray
standard error of the scattering intensities :math:`I(q)`
(only available if ``bin_spectrum==True``).
"""
def __init__(self, atomgroup: mda.AtomGroup, bin_spectrum: bool=True, qmin: float=0, qmax: float=6, dq: float=0.1, thetamin: float=0, thetamax: float=180, refgroup: mda.AtomGroup | None=None, unwrap: bool=False, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='sq.dat') -> None:
self._locals = locals()
super().__init__(atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, concfreq=concfreq, wrap_compound='atoms')
self.bin_spectrum = bin_spectrum
self.qmin = qmin
self.qmax = qmax
self.dq = dq
self.thetamin = thetamin
self.thetamax = thetamax
self.output = output
def _prepare(self) -> None:
logging.info('Analysis of small angle X-ray scattering intensities (SAXS).')
self.thetamin = min(self.thetamin, self.thetamax)
self.thetamax = max(self.thetamin, self.thetamax)
if self.thetamin < 0 or self.thetamin > 180:
raise ValueError(f'thetamin ({self.thetamin}°) has to between 0 and 180°.')
if self.thetamax < 0 or self.thetamax > 180:
raise ValueError(f'thetamax ({self.thetamax}°) has to between 0 and 180°.')
if self.thetamin > self.thetamax:
raise ValueError(f'thetamin ({self.thetamin}°) larger than thetamax ({self.thetamax}°).')
self.thetamin *= np.pi / 180
self.thetamax *= np.pi / 180
self.groups = []
self.weights = []
self.elements = []
for element in np.unique(self.atomgroup.elements):
group = self.atomgroup.select_atoms(f'element {element}')
self.groups.append(group)
self.weights.append(np.ones(group.n_atoms))
self.elements.append(element)
if self.bin_spectrum:
self.n_bins = int(np.ceil((self.qmax - self.qmin) / self.dq))
else:
self.box = np.diag(mda.lib.mdamath.triclinic_vectors(self._universe.dimensions))
self.scattering_vector_factors = 2 * np.pi / self.box
self.max_n = np.ceil(self.qmax / self.scattering_vector_factors).astype(int)
def _single_frame(self) -> float:
box = np.diag(mda.lib.mdamath.triclinic_vectors(self._ts.dimensions))
if self.bin_spectrum:
self._obs.structure_factors = np.zeros(self.n_bins)
self._obs.scattering_intensities = np.zeros(self.n_bins)
else:
if not np.all(box == self.box):
raise ValueError(f'Dimensions in frame {self.frame_index} are different from initial dimenions. Can not use `bin_spectrum=False`.')
self._obs.structure_factors = np.zeros(self.max_n)
self._obs.scattering_intensities = np.zeros(self.max_n)
for i_group, group in enumerate(self.groups):
positions = group.atoms.positions - box * np.round(group.atoms.positions / box)
scattering_vectors, structure_factors = structure_factor(np.double(positions), np.double(box), self.qmin, self.qmax, self.thetamin, self.thetamax, self.weights[i_group])
scattering_intensities = atomic_form_factor(scattering_vectors, self.elements[i_group]) ** 2 * structure_factors
if self.bin_spectrum:
scattering_vectors = scattering_vectors.flatten()
structure_factors = structure_factors.flatten()
scattering_intensities = scattering_intensities.flatten()
nonzeros = np.where(structure_factors != 0)[0]
scattering_vectors = scattering_vectors[nonzeros]
structure_factors = structure_factors[nonzeros]
scattering_intensities = scattering_intensities[nonzeros]
histogram_kwargs = dict(a=scattering_vectors, bins=self.n_bins, range=(self.qmin, self.qmax))
structure_factors, _ = np.histogram(weights=structure_factors, **histogram_kwargs)
scattering_intensities, _ = np.histogram(weights=scattering_intensities, **histogram_kwargs)
self._obs.bincount, _ = np.histogram(weights=None, **histogram_kwargs)
self._obs.structure_factors += structure_factors
self._obs.scattering_intensities += scattering_intensities
else:
self._obs.structure_factors += structure_factors
self._obs.scattering_intensities += scattering_intensities
return structure_factors.flatten()[-1]
def _conclude(self) -> None:
if self.bin_spectrum:
scattering_vectors = np.arange(self.qmin, self.qmax, self.dq) + 0.5 * self.dq
structure_factors = self.sums.structure_factors / self.sums.bincount
scattering_intensities = self.sums.scattering_intensities / self.sums.bincount
dstructure_factors = self.sems.structure_factors
dscattering_intensities = self.sems.scattering_intensities
else:
miller_indices = np.array(list(np.ndindex(tuple(self.max_n))))
scattering_vectors = np.linalg.norm(miller_indices * self.scattering_vector_factors[np.newaxis, :], axis=1)
structure_factors = self.means.structure_factors
scattering_intensities = self.means.scattering_intensities
structure_factors = structure_factors.flatten()
scattering_intensities = scattering_intensities.flatten()
argsort = np.argsort(scattering_vectors)
scattering_vectors = scattering_vectors[argsort]
miller_indices = miller_indices[argsort]
structure_factors = structure_factors[argsort]
scattering_intensities = scattering_intensities[argsort]
nonzeros = np.invert(np.isnan(structure_factors))
scattering_vectors = scattering_vectors[nonzeros]
structure_factors = structure_factors[nonzeros]
scattering_intensities = scattering_intensities[nonzeros]
if self.bin_spectrum:
dstructure_factors = dstructure_factors[nonzeros]
dscattering_intensities = dscattering_intensities[nonzeros]
structure_factors /= self.atomgroup.n_atoms
scattering_intensities /= self.atomgroup.n_atoms
if self.bin_spectrum:
dstructure_factors /= self.atomgroup.n_atoms
dscattering_intensities /= self.atomgroup.n_atoms
self.results.scattering_vectors = scattering_vectors
self.results.structure_factors = structure_factors
self.results.scattering_intensities = scattering_intensities
if self.bin_spectrum:
self.results.dstructure_factors = dstructure_factors
self.results.dscattering_intensities = dscattering_intensities
if not self.bin_spectrum:
self.results.miller_indices = miller_indices[nonzeros]
@render_docs
def save(self) -> None:
"""${SAVE_METHOD_DESCRIPTION}"""
if self.bin_spectrum:
self.savetxt(self.output, np.vstack([self.results.scattering_vectors, self.results.structure_factors, self.results.scattering_intensities, self.results.dstructure_factors, self.results.dscattering_intensities]).T, columns=['q (1/Å)', 'S(q) (arb. units)', 'I(q) (arb. units)', 'ΔS(q)', 'ΔI(q)'])
else:
out = np.hstack([self.results.scattering_vectors[:, np.newaxis], self.results.miller_indices, self.results.structure_factors[:, np.newaxis], self.results.scattering_intensities[:, np.newaxis]])
boxinfo = 'box_x = {:.3f} Å, box_y = {:.3f} Å, box_z = {:.3f} Å\n'.format(*self.box)
self.savetxt(self.output, out, columns=[boxinfo, 'q (1/Å)', 'q_i', 'q_j', 'q_k', 'S(q) (arb. units)', 'I(q) (arb. units)'])
|
@render_docs
class Saxs(AnalysisBase):
'''Small angle X-Ray scattering intensities (SAXS).
This module computes the structure factor :math:`S(q)`, the scattering intensity
(sometimes also called scattering factor) :math:`I(q)` and their corresponding
scattering vectors :math:`q`. For a system containing only one element the structure
factor and the scattering intensity are connected via the atomic form factor
:math:`f(q)`
.. math::
I(q) = [f(q)]^2 S(q)
For more details on the theory behind this module see :ref:`saxs-explanations`.
By default the scattering vectors :math:`\boldsymbol{q}` are binned according to
their length :math:`q` using a bin width given by ``dq``. Setting the option
``bin_spectrum=False``, also the raw scattering vectors and their corresponding
Miller indices can be saved. Saving the scattering vectors and Miller indices is
only possible when the box vectors are constant in the whole trajectory (NVT) since
for changing cells the same Miller indices correspond to different scattering
vectors.
.. warning::
Please be aware that in simulations where the box vectors change, the q-vectors
will differ between frames. Artifacts can arise when the data contains poorly
sampled q-vectors.
Analyzed scattering vectors :math:`q` can be restricted by a minimal and maximal
angle with the z-axis. For ``0`` and ``180``, all possible vectors are taken into
account. To obtain the scattering intensities, the structure factor is normalized by
an element-specific atomic form factor based on Cromer-Mann parameters
:footcite:t:`princeInternationalTablesCrystallography2004`.
For the correlation time estimation the module will use the value of the scattering
intensity with the largest possible :math:`q` value.
For an example on the usage refer to :ref:`How-to: SAXS<howto-saxs>`.
Parameters
----------
${ATOMGROUP_PARAMETER}
bin_spectrum : bool
Bin the spectrum. If :py:obj:`False` Miller indices of q-vector are returned.
Only works for NVT simulations.
${Q_SPACE_PARAMETERS}
thetamin : float
Minimal angle (°) between the q vectors and the z-axis.
thetamax : float
Maximal angle (°) between the q vectors and the z-axis.
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
results.scattering_vectors : numpy.ndarray
Length of the binned scattering vectors.
results.miller_indices : numpy.ndarray
Miller indices of q-vector (only available if ``bin_spectrum==False``).
results.struture_factors : numpy.ndarray
structure factors :math:`S(q)`
results.scattering_intensities : numpy.ndarray
scattering intensities :math:`I(q)`
results.dstruture_factors : numpy.ndarray
standard error of the structure factors :math:`S(q)`
(only available if ``bin_spectrum==True``).
structure factors :math:`S(q)`
results.dscattering_intensities : numpy.ndarray
standard error of the scattering intensities :math:`I(q)`
(only available if ``bin_spectrum==True``).
'''
def __init__(self, atomgroup: mda.AtomGroup, bin_spectrum: bool=True, qmin: float=0, qmax: float=6, dq: float=0.1, thetamin: float=0, thetamax: float=180, refgroup: mda.AtomGroup | None=None, unwrap: bool=False, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='sq.dat') -> None:
pass
def _prepare(self) -> None:
pass
def _single_frame(self) -> float:
pass
def _conclude(self) -> None:
pass
@render_docs
def save(self) -> None:
'''${SAVE_METHOD_DESCRIPTION}'''
pass
| 8
| 2
| 49
| 6
| 42
| 2
| 4
| 0.33
| 1
| 11
| 0
| 0
| 5
| 15
| 5
| 17
| 326
| 46
| 212
| 57
| 190
| 71
| 110
| 41
| 104
| 6
| 2
| 2
| 20
|
328,308
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/TemperaturePlanar.py
|
maicos.modules.TemperaturePlanar.TemperaturePlanar
|
import MDAnalysis as mda
from ..lib.util import render_docs
import logging
from ..lib.weights import temperature_weights
from ..core import ProfilePlanarBase
@render_docs
class TemperaturePlanar(ProfilePlanarBase):
"""Temperature profiles in a cartesian geometry.
Currently only atomistic temperature profiles are supported. Therefore grouping per
molecule, segment, residue, or fragment is not possible.
${CORRELATION_INFO_PLANAR}
Parameters
----------
${ATOMGROUP_PARAMETER}
${PROFILE_PLANAR_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_PLANAR_CLASS_ATTRIBUTES}
"""
def __init__(self, atomgroup: mda.AtomGroup, dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='atoms', sym: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='temperature.dat') -> None:
self._locals = locals()
if grouping != 'atoms':
raise ValueError('Invalid choice of grouping, must use atoms')
super().__init__(atomgroup=atomgroup, unwrap=unwrap, pack=pack, jitter=jitter, concfreq=concfreq, dim=dim, zmin=zmin, zmax=zmax, bin_width=bin_width, refgroup=refgroup, sym=sym, sym_odd=False, grouping=grouping, bin_method=bin_method, output=output, weighting_function=temperature_weights, weighting_function_kwargs=None, normalization='number')
def _prepare(self):
logging.info('Analysis of temperature profiles.')
super()._prepare()
|
@render_docs
class TemperaturePlanar(ProfilePlanarBase):
'''Temperature profiles in a cartesian geometry.
Currently only atomistic temperature profiles are supported. Therefore grouping per
molecule, segment, residue, or fragment is not possible.
${CORRELATION_INFO_PLANAR}
Parameters
----------
${ATOMGROUP_PARAMETER}
${PROFILE_PLANAR_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_PLANAR_CLASS_ATTRIBUTES}
'''
def __init__(self, atomgroup: mda.AtomGroup, dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='atoms', sym: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='temperature.dat') -> None:
pass
def _prepare(self):
pass
| 4
| 1
| 22
| 1
| 22
| 0
| 2
| 0.3
| 1
| 6
| 0
| 0
| 2
| 1
| 2
| 31
| 65
| 8
| 44
| 20
| 25
| 13
| 9
| 4
| 6
| 2
| 4
| 1
| 3
|
328,309
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/VelocityCylinder.py
|
maicos.modules.VelocityCylinder.VelocityCylinder
|
import logging
from ..lib.util import render_docs
from ..lib.weights import velocity_weights
from ..core import ProfileCylinderBase
import MDAnalysis as mda
@render_docs
class VelocityCylinder(ProfileCylinderBase):
"""Cartesian velocity profile across a cylinder.
Reads in coordinates and velocities from a trajectory and calculates a velocity
:math:`[\\mathrm{Å/ps}]` or a flux per unit area :math:`[\\mathrm{Å^{-2}\\,ps^{-1}}]`
profile along a given axis.
The ``grouping`` keyword gives you fine control over the velocity profile, e.g. you
can choose atomar or molecular velocities. Note that if the first one is employed
for complex compounds, usually a contribution corresponding to the vorticity appears
in the profile.
${CORRELATION_INFO_RADIAL}
Parameters
----------
${ATOMGROUP_PARAMETER}
${VDIM_PARAMETER}
${FLUX_PARAMETER}
${PROFILE_CYLINDER_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_CYLINDER_CLASS_ATTRIBUTES}
"""
def __init__(self, atomgroup: mda.AtomGroup, vdim: int=0, flux: bool=False, dim: int=2, zmin: float | None=None, zmax: float | None=None, rmin: float=0, rmax: float | None=None, bin_width: int=1, bin_method: str='com', grouping: str='atoms', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='velocity.dat') -> None:
self._locals = locals()
if vdim not in [0, 1, 2]:
raise ValueError('Velocity dimension can only be x=0, y=1 or z=2.')
normalization = 'volume' if flux else 'number'
super().__init__(atomgroup=atomgroup, unwrap=unwrap, pack=pack, jitter=jitter, concfreq=concfreq, dim=dim, zmin=zmin, zmax=zmax, bin_width=bin_width, rmin=rmin, rmax=rmax, refgroup=refgroup, grouping=grouping, bin_method=bin_method, output=output, weighting_function=velocity_weights, weighting_function_kwargs={'vdim': vdim}, normalization=normalization)
def _prepare(self):
logging.info('Analysis of the velocity profile.')
super()._prepare()
|
@render_docs
class VelocityCylinder(ProfileCylinderBase):
'''Cartesian velocity profile across a cylinder.
Reads in coordinates and velocities from a trajectory and calculates a velocity
:math:`[\mathrm{Å/ps}]` or a flux per unit area :math:`[\mathrm{Å^{-2}\,ps^{-1}}]`
profile along a given axis.
The ``grouping`` keyword gives you fine control over the velocity profile, e.g. you
can choose atomar or molecular velocities. Note that if the first one is employed
for complex compounds, usually a contribution corresponding to the vorticity appears
in the profile.
${CORRELATION_INFO_RADIAL}
Parameters
----------
${ATOMGROUP_PARAMETER}
${VDIM_PARAMETER}
${FLUX_PARAMETER}
${PROFILE_CYLINDER_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_CYLINDER_CLASS_ATTRIBUTES}
'''
def __init__(self, atomgroup: mda.AtomGroup, vdim: int=0, flux: bool=False, dim: int=2, zmin: float | None=None, zmax: float | None=None, rmin: float=0, rmax: float | None=None, bin_width: int=1, bin_method: str='com', grouping: str='atoms', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='velocity.dat') -> None:
pass
def _prepare(self):
pass
| 4
| 1
| 24
| 1
| 24
| 0
| 2
| 0.42
| 1
| 6
| 0
| 0
| 2
| 1
| 2
| 36
| 77
| 9
| 48
| 24
| 26
| 20
| 10
| 5
| 7
| 3
| 5
| 1
| 4
|
328,310
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/VelocityPlanar.py
|
maicos.modules.VelocityPlanar.VelocityPlanar
|
from ..core import ProfilePlanarBase
import logging
from ..lib.util import render_docs
import MDAnalysis as mda
from ..lib.weights import velocity_weights
@render_docs
class VelocityPlanar(ProfilePlanarBase):
"""Velocity profiles in a cartesian geometry.
Reads in coordinates and velocities from a trajectory and calculates a velocity
:math:`[\\mathrm{Å/ps}]` or a flux per unit area :math:`[\\mathrm{Å^{-2}\\,ps^{-1}}]`
profile along a given axis.
The ``grouping`` keyword gives you fine control over the velocity profile, e.g. you
can choose atomar or molecular velocities. Note that if the first one is employed
for complex compounds, usually a contribution corresponding to the vorticity appears
in the profile.
${CORRELATION_INFO_PLANAR}
Parameters
----------
${ATOMGROUP_PARAMETER}
sym_odd : bool,
Parity of the profile. If :obj:`False`, the profile will be symmetrized. If
:obj:`True`, the profile is antisymmetrized. Only relevant in combination with
``sym``.
${VDIM_PARAMETER}
${FLUX_PARAMETER}
${PROFILE_PLANAR_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_PLANAR_CLASS_ATTRIBUTES}
"""
def __init__(self, atomgroup: mda.AtomGroup, sym_odd: bool=False, vdim: int=0, flux: bool=False, dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=1.0, bin_method: str='com', grouping: str='atoms', sym: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='velocity.dat') -> None:
self._locals = locals()
if vdim not in [0, 1, 2]:
raise ValueError('Velocity dimension can only be x=0, y=1 or z=2.')
normalization = 'volume' if flux else 'number'
super().__init__(atomgroup=atomgroup, unwrap=unwrap, pack=pack, jitter=jitter, concfreq=concfreq, dim=dim, zmin=zmin, zmax=zmax, bin_width=bin_width, refgroup=refgroup, sym=sym, sym_odd=sym_odd, grouping=grouping, bin_method=bin_method, output=output, weighting_function=velocity_weights, weighting_function_kwargs={'vdim': vdim}, normalization=normalization)
def _prepare(self):
logging.info('Analysis of the velocity profile.')
super()._prepare()
|
@render_docs
class VelocityPlanar(ProfilePlanarBase):
'''Velocity profiles in a cartesian geometry.
Reads in coordinates and velocities from a trajectory and calculates a velocity
:math:`[\mathrm{Å/ps}]` or a flux per unit area :math:`[\mathrm{Å^{-2}\,ps^{-1}}]`
profile along a given axis.
The ``grouping`` keyword gives you fine control over the velocity profile, e.g. you
can choose atomar or molecular velocities. Note that if the first one is employed
for complex compounds, usually a contribution corresponding to the vorticity appears
in the profile.
${CORRELATION_INFO_PLANAR}
Parameters
----------
${ATOMGROUP_PARAMETER}
sym_odd : bool,
Parity of the profile. If :obj:`False`, the profile will be symmetrized. If
:obj:`True`, the profile is antisymmetrized. Only relevant in combination with
``sym``.
${VDIM_PARAMETER}
${FLUX_PARAMETER}
${PROFILE_PLANAR_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_PLANAR_CLASS_ATTRIBUTES}
'''
def __init__(self, atomgroup: mda.AtomGroup, sym_odd: bool=False, vdim: int=0, flux: bool=False, dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=1.0, bin_method: str='com', grouping: str='atoms', sym: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='velocity.dat') -> None:
pass
def _prepare(self):
pass
| 4
| 1
| 24
| 1
| 24
| 0
| 2
| 0.5
| 1
| 6
| 0
| 0
| 2
| 1
| 2
| 31
| 81
| 9
| 48
| 24
| 26
| 24
| 10
| 5
| 7
| 3
| 4
| 1
| 4
|
328,311
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/densitycylinder.py
|
maicos.modules.densitycylinder.DensityCylinder
|
from ..lib.weights import density_weights
from ..lib.util import render_docs
import logging
import MDAnalysis as mda
from ..core import ProfileCylinderBase
@render_docs
class DensityCylinder(ProfileCylinderBase):
"""Cylindrical partial density profiles.
${DENSITY_CYLINDER_DESCRIPTION}
${CORRELATION_INFO_RADIAL}
Parameters
----------
${ATOMGROUP_PARAMETER}
${DENS_PARAMETER}
${PROFILE_CYLINDER_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_CYLINDER_CLASS_ATTRIBUTES}
"""
def __init__(self, atomgroup: mda.AtomGroup, dens: str='mass', dim: int=2, zmin: float | None=None, zmax: float | None=None, rmin: float=0, rmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='atoms', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, concfreq: int=0, jitter: float=0.0, output: str='density.dat') -> None:
self._locals = locals()
super().__init__(atomgroup=atomgroup, unwrap=unwrap, pack=pack, jitter=jitter, concfreq=concfreq, dim=dim, zmin=zmin, zmax=zmax, bin_width=bin_width, rmin=rmin, rmax=rmax, refgroup=refgroup, grouping=grouping, bin_method=bin_method, output=output, weighting_function=density_weights, weighting_function_kwargs={'dens': dens}, normalization='volume')
def _prepare(self):
logging.info(f"Analysis of the {self._locals['dens']} density profile.")
super()._prepare()
|
@render_docs
class DensityCylinder(ProfileCylinderBase):
'''Cylindrical partial density profiles.
${DENSITY_CYLINDER_DESCRIPTION}
${CORRELATION_INFO_RADIAL}
Parameters
----------
${ATOMGROUP_PARAMETER}
${DENS_PARAMETER}
${PROFILE_CYLINDER_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_CYLINDER_CLASS_ATTRIBUTES}
'''
def __init__(self, atomgroup: mda.AtomGroup, dens: str='mass', dim: int=2, zmin: float | None=None, zmax: float | None=None, rmin: float=0, rmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='atoms', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, concfreq: int=0, jitter: float=0.0, output: str='density.dat') -> None:
pass
def _prepare(self):
pass
| 4
| 1
| 22
| 0
| 22
| 0
| 1
| 0.3
| 1
| 5
| 0
| 0
| 2
| 1
| 2
| 36
| 64
| 7
| 44
| 22
| 23
| 13
| 7
| 4
| 4
| 1
| 5
| 0
| 2
|
328,312
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/densityplanar.py
|
maicos.modules.densityplanar.DensityPlanar
|
from ..lib.weights import density_weights
import MDAnalysis as mda
import logging
from ..lib.util import render_docs
from ..core import ProfilePlanarBase
@render_docs
class DensityPlanar(ProfilePlanarBase):
"""Cartesian partial density profiles.
${DENSITY_PLANAR_DESCRIPTION}
${CORRELATION_INFO_PLANAR}
Parameters
----------
${ATOMGROUP_PARAMETER}
${DENS_PARAMETER}
${PROFILE_PLANAR_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_PLANAR_CLASS_ATTRIBUTES}
Notes
-----
Partial mass density profiles can be used to calculate the ideal component of the
chemical potential. For details, take a look at the corresponding :ref:`How-to
guide<howto-chemical-potential>`.
"""
def __init__(self, atomgroup: mda.AtomGroup, dens: str='mass', dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='atoms', sym: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='density.dat') -> None:
self._locals = locals()
super().__init__(atomgroup=atomgroup, unwrap=unwrap, pack=pack, jitter=jitter, concfreq=concfreq, dim=dim, zmin=zmin, zmax=zmax, bin_width=bin_width, refgroup=refgroup, sym=sym, sym_odd=False, grouping=grouping, bin_method=bin_method, output=output, weighting_function=density_weights, weighting_function_kwargs={'dens': dens}, normalization='volume')
def _prepare(self):
logging.info(f"Analysis of the {self._locals['dens']} density profile.")
super()._prepare()
|
@render_docs
class DensityPlanar(ProfilePlanarBase):
'''Cartesian partial density profiles.
${DENSITY_PLANAR_DESCRIPTION}
${CORRELATION_INFO_PLANAR}
Parameters
----------
${ATOMGROUP_PARAMETER}
${DENS_PARAMETER}
${PROFILE_PLANAR_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_PLANAR_CLASS_ATTRIBUTES}
Notes
-----
Partial mass density profiles can be used to calculate the ideal component of the
chemical potential. For details, take a look at the corresponding :ref:`How-to
guide<howto-chemical-potential>`.
'''
def __init__(self, atomgroup: mda.AtomGroup, dens: str='mass', dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='atoms', sym: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='density.dat') -> None:
pass
def _prepare(self):
pass
| 4
| 1
| 21
| 0
| 21
| 0
| 1
| 0.42
| 1
| 5
| 0
| 0
| 2
| 1
| 2
| 31
| 69
| 8
| 43
| 21
| 23
| 18
| 7
| 4
| 4
| 1
| 4
| 0
| 2
|
328,313
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/densitysphere.py
|
maicos.modules.densitysphere.DensitySphere
|
import MDAnalysis as mda
from ..lib.weights import density_weights
from ..core import ProfileSphereBase
import logging
from ..lib.util import render_docs
@render_docs
class DensitySphere(ProfileSphereBase):
"""Spherical partial density profiles.
${DENSITY_SPHERE_DESCRIPTION}
${CORRELATION_INFO_RADIAL}
Parameters
----------
${ATOMGROUP_PARAMETER}
${DENS_PARAMETER}
${PROFILE_SPHERE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_SPHERE_CLASS_ATTRIBUTES}
"""
def __init__(self, atomgroup: mda.AtomGroup, dens: str='mass', rmin: float=0, rmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='atoms', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='density.dat') -> None:
self._locals = locals()
super().__init__(atomgroup=atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, concfreq=concfreq, rmin=rmin, rmax=rmax, bin_width=bin_width, grouping=grouping, bin_method=bin_method, output=output, weighting_function=density_weights, weighting_function_kwargs={'dens': dens}, normalization='volume')
def _prepare(self):
logging.info(f"Analysis of the {self._locals['dens']} density profile.")
super()._prepare()
|
@render_docs
class DensitySphere(ProfileSphereBase):
'''Spherical partial density profiles.
${DENSITY_SPHERE_DESCRIPTION}
${CORRELATION_INFO_RADIAL}
Parameters
----------
${ATOMGROUP_PARAMETER}
${DENS_PARAMETER}
${PROFILE_SPHERE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_SPHERE_CLASS_ATTRIBUTES}
'''
def __init__(self, atomgroup: mda.AtomGroup, dens: str='mass', rmin: float=0, rmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='atoms', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='density.dat') -> None:
pass
def _prepare(self):
pass
| 4
| 1
| 19
| 0
| 19
| 0
| 1
| 0.34
| 1
| 5
| 0
| 0
| 2
| 1
| 2
| 30
| 58
| 7
| 38
| 19
| 20
| 13
| 7
| 4
| 4
| 1
| 4
| 0
| 2
|
328,314
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/dielectriccylinder.py
|
maicos.modules.dielectriccylinder.DielectricCylinder
|
import numpy as np
import logging
import MDAnalysis as mda
from ..lib.util import charge_neutral, citation_reminder, get_compound, render_docs
import scipy.constants
from ..core import CylinderBase
@render_docs
@charge_neutral(filter='error')
class DielectricCylinder(CylinderBase):
"""Cylindrical dielectric profiles.
Computes the axial :math:`\\varepsilon_z(r)` and inverse radial
:math:`\\varepsilon_r^{-1}(r)` components of the cylindrical dielectric tensor
:math:`\\varepsilon`. The components are binned along the radial direction of the
cylinder. The :math:`z`-axis of the cylinder is pointing in the direction given by
the ``dim`` parameter. The center of the cylinder is either located at the center of
the simulation box (default) or at the center of mass of the ``refgroup``, if
provided.
For usage please refer to :ref:`How-to: Dielectric constant<howto-dielectric>` and
for details on the theory see :ref:`dielectric-explanations`.
For correlation analysis, the component along the :math:`z`-axis is used.
${CORRELATION_INFO}
Also, please read and cite :footcite:p:`locheGiantaxialDielectric2019`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${TEMPERATURE_PARAMETER}
vcutwidth : float
Spacing of virtual cuts (bins) along the parallel directions.
single : bool
For a single chain of molecules the average of :math:`M` is zero. This flag sets
:math:`\\langle M \\rangle = 0`.
${CYLINDER_CLASS_PARAMETERS}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PREFIX_PARAMETER}
Attributes
----------
${CYLINDER_CLASS_ATTRIBUTES}
results.eps_z : numpy.ndarray
Reduced axial dielectric profile :math:`(\\varepsilon_z(r) - 1)` of the
selected atomgroup
results.deps_z : numpy.ndarray
Estimated uncertainty of axial dielectric profile
results.eps_r : numpy.ndarray
Reduced inverse radial dielectric profile
:math:`(\\varepsilon^{-1}_r(r) - 1)`
results.deps_r : numpy.ndarray
Estimated uncertainty of inverse radial dielectric profile
"""
def __init__(self, atomgroup: mda.AtomGroup, temperature: float=300, vcutwidth: float=0.1, single: bool=False, dim: int=2, zmin: float | None=None, zmax: float | None=None, rmin: float=0, rmax: float | None=None, bin_width: float=0.1, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output_prefix: str='eps_cyl') -> None:
self._locals = locals()
self.comp = get_compound(atomgroup)
ix = atomgroup._get_compound_indices(self.comp)
_, self.inverse_ix = np.unique(ix, return_inverse=True)
if zmin is not None or zmax is not None or rmin != 0 or (rmax is not None):
logging.warning('Setting `rmin` and `rmax` (as well as `zmin` and `zmax`) might cut off molecules. This will lead to severe artifacts in the dielectric profiles.')
super().__init__(atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, concfreq=concfreq, dim=dim, zmin=zmin, zmax=zmax, bin_width=bin_width, rmin=rmin, rmax=rmax, wrap_compound=self.comp)
self.output_prefix = output_prefix
self.temperature = temperature
self.single = single
self.vcutwidth = vcutwidth
def _prepare(self) -> None:
logging.info('Analysis of the axial and inverse radial components of the cylindrical dielectric tensor.')
logging.info(citation_reminder('10.1021/acs.jpcb.9b09269'))
super()._prepare()
def _single_frame(self) -> float:
super()._single_frame()
rbins = np.digitize(self.pos_cyl[:, 0], self._obs.bin_edges[1:-1])
curQ_r = np.bincount(rbins[self.atomgroup.ix], weights=self.atomgroup.charges, minlength=self.n_bins)
self._obs.m_r = -np.cumsum(curQ_r) / 2 / np.pi / self._obs.L / self._obs.bin_pos
curQ_r_tot = np.bincount(rbins, weights=self._universe.atoms.charges, minlength=self.n_bins)
self._obs.m_r_tot = -np.cumsum(curQ_r_tot) / 2 / np.pi / self._obs.L / self._obs.bin_pos
self._obs.M_r = np.sum(self._obs.m_r_tot * self._obs.bin_width)
self._obs.mM_r = self._obs.m_r * self._obs.M_r
nbinsz = np.ceil(self._obs.L / self.vcutwidth).astype(int)
chargepos = self.pos_cyl[self.atomgroup.ix, 0] * np.abs(self.atomgroup.charges)
center = self.atomgroup.accumulate(chargepos, compound=self.comp) / self.atomgroup.accumulate(np.abs(self.atomgroup.charges), compound=self.comp)
testpos = center[self.inverse_ix]
rbins = np.digitize(testpos, self._obs.bin_edges[1:-1])
z = np.arange(nbinsz) * (self._obs.L / nbinsz)
zbins = np.digitize(self.pos_cyl[self.atomgroup.ix, 2], z[1:])
curQz = np.bincount(rbins + self.n_bins * zbins, weights=self.atomgroup.charges, minlength=self.n_bins * nbinsz).reshape(nbinsz, self.n_bins)
curqz = np.cumsum(curQz, axis=0) / self._obs.bin_area[np.newaxis, :]
self._obs.m_z = -curqz.mean(axis=0)
self._obs.M_z = np.dot(self._universe.atoms.charges, self.pos_cyl[:, 2])
self._obs.mM_z = self._obs.m_z * self._obs.M_z
return self._obs.M_z
def _conclude(self) -> None:
super()._conclude()
self._pref = 1 / scipy.constants.epsilon_0
self._pref /= scipy.constants.Boltzmann * self.temperature
self._pref /= scipy.constants.angstrom / scipy.constants.elementary_charge ** 2
if not self.single:
cov_z = self.means.mM_z - self.means.m_z * self.means.M_z
cov_r = self.means.mM_r - self.means.m_r * self.means.M_r
dcov_z = np.sqrt(self.sems.mM_z ** 2 + self.sems.m_z ** 2 * self.means.M_z ** 2 + self.means.m_z ** 2 * self.sems.M_z ** 2)
dcov_r = np.sqrt(self.sems.mM_r ** 2 + self.sems.m_r ** 2 * self.means.M_r ** 2 + self.means.m_r ** 2 * self.sems.M_r ** 2)
else:
cov_z = self.means.mM_z
cov_r = self.means.mM_r
dcov_z = self.sems.mM_z
dcov_r = self.sems.mM_r
self.results.eps_z = self._pref * cov_z
self.results.deps_z = self._pref * dcov_z
self.results.eps_r = -(2 * np.pi * self._obs.L * self._pref * self.results.bin_pos * cov_r)
self.results.deps_r = 2 * np.pi * self._obs.L * self._pref * self.results.bin_pos * dcov_r
@render_docs
def save(self) -> None:
"""${SAVE_METHOD_PREFIX_DESCRIPTION}"""
outdata_z = np.array([self.results.bin_pos, self.results.eps_z, self.results.deps_z]).T
outdata_r = np.array([self.results.bin_pos, self.results.eps_r, self.results.deps_r]).T
columns = ['positions [Å]']
columns += ['ε_z - 1', 'Δε_z']
self.savetxt('{}{}'.format(self.output_prefix, '_z.dat'), outdata_z, columns=columns)
columns = ['positions [Å]']
columns += ['ε^-1_r - 1', 'Δε^-1_r']
self.savetxt('{}{}'.format(self.output_prefix, '_r.dat'), outdata_r, columns=columns)
|
@render_docs
@charge_neutral(filter='error')
class DielectricCylinder(CylinderBase):
'''Cylindrical dielectric profiles.
Computes the axial :math:`\varepsilon_z(r)` and inverse radial
:math:`\varepsilon_r^{-1}(r)` components of the cylindrical dielectric tensor
:math:`\varepsilon`. The components are binned along the radial direction of the
cylinder. The :math:`z`-axis of the cylinder is pointing in the direction given by
the ``dim`` parameter. The center of the cylinder is either located at the center of
the simulation box (default) or at the center of mass of the ``refgroup``, if
provided.
For usage please refer to :ref:`How-to: Dielectric constant<howto-dielectric>` and
for details on the theory see :ref:`dielectric-explanations`.
For correlation analysis, the component along the :math:`z`-axis is used.
${CORRELATION_INFO}
Also, please read and cite :footcite:p:`locheGiantaxialDielectric2019`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${TEMPERATURE_PARAMETER}
vcutwidth : float
Spacing of virtual cuts (bins) along the parallel directions.
single : bool
For a single chain of molecules the average of :math:`M` is zero. This flag sets
:math:`\langle M \rangle = 0`.
${CYLINDER_CLASS_PARAMETERS}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PREFIX_PARAMETER}
Attributes
----------
${CYLINDER_CLASS_ATTRIBUTES}
results.eps_z : numpy.ndarray
Reduced axial dielectric profile :math:`(\varepsilon_z(r) - 1)` of the
selected atomgroup
results.deps_z : numpy.ndarray
Estimated uncertainty of axial dielectric profile
results.eps_r : numpy.ndarray
Reduced inverse radial dielectric profile
:math:`(\varepsilon^{-1}_r(r) - 1)`
results.deps_r : numpy.ndarray
Estimated uncertainty of inverse radial dielectric profile
'''
def __init__(self, atomgroup: mda.AtomGroup, temperature: float=300, vcutwidth: float=0.1, single: bool=False, dim: int=2, zmin: float | None=None, zmax: float | None=None, rmin: float=0, rmax: float | None=None, bin_width: float=0.1, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output_prefix: str='eps_cyl') -> None:
pass
def _prepare(self) -> None:
pass
def _single_frame(self) -> float:
pass
def _conclude(self) -> None:
pass
@render_docs
def save(self) -> None:
'''${SAVE_METHOD_PREFIX_DESCRIPTION}'''
pass
| 9
| 2
| 39
| 5
| 29
| 5
| 1
| 0.45
| 1
| 5
| 0
| 0
| 5
| 8
| 5
| 28
| 248
| 38
| 145
| 52
| 120
| 65
| 66
| 33
| 60
| 2
| 4
| 1
| 7
|
328,315
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/dielectricplanar.py
|
maicos.modules.dielectricplanar.DielectricPlanar
|
from ..core import PlanarBase
import numpy as np
from ..lib.math import symmetrize
from ..lib.util import charge_neutral, citation_reminder, get_compound, render_docs
import MDAnalysis as mda
import scipy.constants
import logging
@render_docs
@charge_neutral(filter='error')
class DielectricPlanar(PlanarBase):
"""Planar dielectric profiles.
Computes the parallel :math:`\\varepsilon_\\parallel(z)` and inverse perpendicular
(:math:`\\varepsilon_\\perp^{-1}(r)`) components of the planar dielectric tensor
:math:`\\varepsilon`. The components are binned along the cartesian :math:`z`
direction yielding the component normal to the surface and defined by the ``dim``
parameter.
For usage please refer to :ref:`How-to: Dielectric constant<howto-dielectric>` and
for details on the theory see :ref:`dielectric-explanations`.
For correlation analysis, the norm of the parallel total dipole moment is used.
${CORRELATION_INFO}
Also, please read and cite
:footcite:t:`schlaichWaterDielectricEffects2016` and Refs.
:footcite:p:`locheUniversalNonuniversalAspects2020`,
:footcite:p:`bonthuisProfileStaticPermittivity2012`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${TEMPERATURE_PARAMETER}
vcutwidth : float
Spacing of virtual cuts (bins) along the parallel directions.
is_3d : bool
Use 3d-periodic boundary conditions, i.e., include the dipole correction for
the interaction between periodic images
:footcite:p:`sternCalculationDielectricPermittivity2003`.
${PLANAR_CLASS_PARAMETERS}
${SYM_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PREFIX_PARAMETER}
Attributes
----------
${PLANAR_CLASS_ATTRIBUTES}
results.eps_par : numpy.ndarray
Reduced parallel dielectric profile
:math:`(\\varepsilon_\\parallel(z) - 1)` of the selected AtomGroup
results.deps_par : numpy.ndarray
Uncertainty of parallel dielectric profile
results.eps_par_self : numpy.ndarray
Reduced self contribution of parallel dielectric profile
:math:`(\\varepsilon_{\\parallel,\\mathrm{self}}(z) - 1)`
results.eps_par_coll : numpy.ndarray
Reduced collective contribution of parallel dielectric profile
:math:`(\\varepsilon_{\\parallel,\\mathrm{coll}}(z) - 1)`
results.eps_perp : numpy.ndarray
Reduced inverse perpendicular dielectric profile
:math:`(\\varepsilon^{-1}_\\perp(z) - 1)`
results.deps_perp : numpy.ndarray
Uncertainty of inverse perpendicular dielectric profile
results.eps_perp_self : numpy.ndarray
Reduced self contribution of the inverse perpendicular dielectric
profile :math:`(\\varepsilon^{-1}_{\\perp,\\mathrm{self}}(z) - 1)`
results.eps_perp_coll : numpy.ndarray
Reduced collective contribution of the inverse perpendicular dielectric profile
:math:`(\\varepsilon^{-1}_{\\perp,\\mathrm{coll}}(z) - 1)`
"""
def __init__(self, atomgroup: mda.AtomGroup, temperature: float=300, vcutwidth: float=0.1, is_3d: bool=False, dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=0.5, sym: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output_prefix: str='eps') -> None:
self._locals = locals()
wrap_compound = get_compound(atomgroup)
if zmin is not None or zmax is not None:
logging.warning('Setting `zmin` and `zmax` might cut off molecules. This will lead to severe artifacts in the dielectric profiles.')
super().__init__(atomgroup=atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, dim=dim, zmin=zmin, zmax=zmax, bin_width=bin_width, wrap_compound=wrap_compound, concfreq=concfreq)
self.is_3d = is_3d
self.sym = sym
self.temperature = temperature
self.output_prefix = output_prefix
self.concfreq = concfreq
self.vcutwidth = vcutwidth
def _prepare(self) -> None:
logging.info('Analysis of the parallel and inverse perpendicular components of the planar dielectric tensor.')
logging.info(citation_reminder('10.1103/PhysRevLett.117.048001'))
super()._prepare()
self.comp = get_compound(self.atomgroup)
ix = self.atomgroup._get_compound_indices(self.comp)
_, inverse_ix = np.unique(ix, return_inverse=True)
self.inverse_ix = inverse_ix
def _single_frame(self) -> float:
super()._single_frame()
self._obs.M = np.dot(self._universe.atoms.charges, self._universe.atoms.positions)
self._obs.M_perp = self._obs.M[self.dim]
self._obs.M_perp_2 = self._obs.M[self.dim] ** 2
self._obs.M_par = self._obs.M[self.odims]
self._obs.m_par = np.zeros((self.n_bins, 2))
self._obs.mM_par = np.zeros(self.n_bins)
self._obs.mm_par = np.zeros(self.n_bins)
self._obs.cmM_par = np.zeros(self.n_bins)
self._obs.cM_par = np.zeros((self.n_bins, 2))
self._obs.m_perp = np.zeros(self.n_bins)
self._obs.mM_perp = np.zeros(self.n_bins)
self._obs.mm_perp = np.zeros(self.n_bins)
self._obs.cmM_perp = np.zeros(self.n_bins)
self._obs.cM_perp = np.zeros(self.n_bins)
zbins = np.digitize(self.atomgroup.atoms.positions[:, self.dim], self._obs.bin_edges[1:-1])
curQ = np.bincount(zbins, weights=self.atomgroup.atoms.charges, minlength=self.n_bins)
self._obs.m_perp = -np.cumsum(curQ / self._obs.bin_area)
self._obs.mM_perp = self._obs.m_perp * self._obs.M_perp
self._obs.mm_perp = self._obs.m_perp ** 2 * self._obs.bin_volume
self._obs.cmM_perp = self._obs.m_perp * (self._obs.M_perp - self._obs.m_perp * self._obs.bin_volume)
self._obs.cM_perp = self._obs.M_perp - self._obs.m_perp * self._obs.bin_volume
testpos = self.atomgroup.center(weights=np.abs(self.atomgroup.charges), compound=self.comp)[self.inverse_ix, self.dim]
for j, direction in enumerate(self.odims):
Lx = self._ts.dimensions[direction]
Ax = self._ts.dimensions[self.odims[1 - j]] * self._obs.bin_width
vbinsx = np.ceil(Lx / self.vcutwidth).astype(int)
x_bin_edges = np.arange(vbinsx) * (Lx / vbinsx)
zpos = np.digitize(testpos, self._obs.bin_edges[1:-1])
xbins = np.digitize(self.atomgroup.atoms.positions[:, direction], x_bin_edges[1:])
curQx = np.bincount(zpos + self.n_bins * xbins, weights=self.atomgroup.charges, minlength=vbinsx * self.n_bins).reshape(vbinsx, self.n_bins)
self._obs.m_par[:, j] = -np.cumsum(curQx / Ax, axis=0).mean(axis=0)
bin_volume = self._obs.bin_volume[0]
self._obs.mM_par = np.dot(self._obs.m_par, self._obs.M_par)
self._obs.mm_par = (self._obs.m_par * self._obs.m_par).sum(axis=1) * bin_volume
self._obs.cmM_par = (self._obs.m_par * (self._obs.M_par - self._obs.m_par * bin_volume)).sum(axis=1)
self._obs.cM_par = self._obs.M_par - self._obs.m_par * bin_volume
return np.linalg.norm(self._obs.M_par)
def _conclude(self) -> None:
super()._conclude()
self._pref = 1 / scipy.constants.epsilon_0
self._pref /= scipy.constants.Boltzmann * self.temperature
self._pref /= scipy.constants.angstrom / scipy.constants.elementary_charge ** 2
self.results.V = self.means.bin_volume.sum()
cov_perp = self.means.mM_perp - self.means.m_perp * self.means.M_perp
dcov_perp = np.sqrt(self.sems.mM_perp ** 2 + (self.means.M_perp * self.sems.m_perp) ** 2 + (self.means.m_perp * self.sems.M_perp) ** 2)
var_perp = self.means.M_perp_2 - self.means.M_perp ** 2
cov_perp_self = self.means.mm_perp - self.means.m_perp ** 2 * self.means.bin_volume[0]
cov_perp_coll = self.means.cmM_perp - self.means.m_perp * self.means.cM_perp
if not self.is_3d:
self.results.eps_perp = -self._pref * cov_perp
self.results.eps_perp_self = -self._pref * cov_perp_self
self.results.eps_perp_coll = -self._pref * cov_perp_coll
self.results.deps_perp = self._pref * dcov_perp
else:
self.results.eps_perp = -cov_perp / (self._pref ** (-1) + var_perp / self.results.V)
self.results.deps_perp = self._pref * dcov_perp
self.results.eps_perp_self = -self._pref * cov_perp_self / (1 + self._pref / self.results.V * var_perp)
self.results.eps_perp_coll = -self._pref * cov_perp_coll / (1 + self._pref / self.results.V * var_perp)
cov_par = np.zeros(self.n_bins)
dcov_par = np.zeros(self.n_bins)
cov_par_self = np.zeros(self.n_bins)
cov_par_coll = np.zeros(self.n_bins)
cov_par = 0.5 * (self.means.mM_par - np.dot(self.means.m_par, self.means.M_par.T))
dcov_par = 0.5 * np.sqrt(self.sems.mM_par ** 2 + np.dot(self.sems.m_par ** 2, (self.means.M_par ** 2).T) + np.dot(self.means.m_par ** 2, (self.sems.M_par ** 2).T))
cov_par_self = 0.5 * (self.means.mm_par - np.dot(self.means.m_par, self.means.m_par.sum(axis=0)))
cov_par_coll = 0.5 * (self.means.cmM_par - (self.means.m_par * self.means.cM_par).sum(axis=1))
self.results.eps_par = self._pref * cov_par
self.results.deps_par = self._pref * dcov_par
self.results.eps_par_self = self._pref * cov_par_self
self.results.eps_par_coll = self._pref * cov_par_coll
if self.sym:
symmetrize(self.results.eps_perp, axis=0, inplace=True)
symmetrize(self.results.deps_perp, axis=0, inplace=True)
symmetrize(self.results.eps_perp_self, axis=0, inplace=True)
symmetrize(self.results.eps_perp_coll, axis=0, inplace=True)
symmetrize(self.results.eps_par, axis=0, inplace=True)
symmetrize(self.results.deps_par, axis=0, inplace=True)
symmetrize(self.results.eps_par_self, axis=0, inplace=True)
symmetrize(self.results.eps_par_coll, axis=0, inplace=True)
@render_docs
def save(self) -> None:
"""${SAVE_METHOD_PREFIX_DESCRIPTION}"""
columns = ['position [Å]']
columns.append('ε^-1_⟂ - 1')
columns.append('Δε^-1_⟂')
columns.append('self ε^-1_⟂ - 1')
columns.append('coll. ε^-1_⟂ - 1')
outdata_perp = np.vstack([self.results.bin_pos, self.results.eps_perp, self.results.deps_perp, self.results.eps_perp_self, self.results.eps_perp_coll]).T
self.savetxt('{}{}'.format(self.output_prefix, '_perp'), outdata_perp, columns=columns)
columns = ['position [Å]']
columns.append('ε_∥ - 1')
columns.append('Δε_∥')
columns.append('self ε_∥ - 1')
columns.append('coll ε_∥ - 1')
outdata_par = np.vstack([self.results.bin_pos, self.results.eps_par, self.results.deps_par, self.results.eps_par_self, self.results.eps_par_coll]).T
self.savetxt('{}{}'.format(self.output_prefix, '_par'), outdata_par, columns=columns)
|
@render_docs
@charge_neutral(filter='error')
class DielectricPlanar(PlanarBase):
'''Planar dielectric profiles.
Computes the parallel :math:`\varepsilon_\parallel(z)` and inverse perpendicular
(:math:`\varepsilon_\perp^{-1}(r)`) components of the planar dielectric tensor
:math:`\varepsilon`. The components are binned along the cartesian :math:`z`
direction yielding the component normal to the surface and defined by the ``dim``
parameter.
For usage please refer to :ref:`How-to: Dielectric constant<howto-dielectric>` and
for details on the theory see :ref:`dielectric-explanations`.
For correlation analysis, the norm of the parallel total dipole moment is used.
${CORRELATION_INFO}
Also, please read and cite
:footcite:t:`schlaichWaterDielectricEffects2016` and Refs.
:footcite:p:`locheUniversalNonuniversalAspects2020`,
:footcite:p:`bonthuisProfileStaticPermittivity2012`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${TEMPERATURE_PARAMETER}
vcutwidth : float
Spacing of virtual cuts (bins) along the parallel directions.
is_3d : bool
Use 3d-periodic boundary conditions, i.e., include the dipole correction for
the interaction between periodic images
:footcite:p:`sternCalculationDielectricPermittivity2003`.
${PLANAR_CLASS_PARAMETERS}
${SYM_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PREFIX_PARAMETER}
Attributes
----------
${PLANAR_CLASS_ATTRIBUTES}
results.eps_par : numpy.ndarray
Reduced parallel dielectric profile
:math:`(\varepsilon_\parallel(z) - 1)` of the selected AtomGroup
results.deps_par : numpy.ndarray
Uncertainty of parallel dielectric profile
results.eps_par_self : numpy.ndarray
Reduced self contribution of parallel dielectric profile
:math:`(\varepsilon_{\parallel,\mathrm{self}}(z) - 1)`
results.eps_par_coll : numpy.ndarray
Reduced collective contribution of parallel dielectric profile
:math:`(\varepsilon_{\parallel,\mathrm{coll}}(z) - 1)`
results.eps_perp : numpy.ndarray
Reduced inverse perpendicular dielectric profile
:math:`(\varepsilon^{-1}_\perp(z) - 1)`
results.deps_perp : numpy.ndarray
Uncertainty of inverse perpendicular dielectric profile
results.eps_perp_self : numpy.ndarray
Reduced self contribution of the inverse perpendicular dielectric
profile :math:`(\varepsilon^{-1}_{\perp,\mathrm{self}}(z) - 1)`
results.eps_perp_coll : numpy.ndarray
Reduced collective contribution of the inverse perpendicular dielectric profile
:math:`(\varepsilon^{-1}_{\perp,\mathrm{coll}}(z) - 1)`
'''
def __init__(self, atomgroup: mda.AtomGroup, temperature: float=300, vcutwidth: float=0.1, is_3d: bool=False, dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=0.5, sym: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output_prefix: str='eps') -> None:
pass
def _prepare(self) -> None:
pass
def _single_frame(self) -> float:
pass
def _conclude(self) -> None:
pass
@render_docs
def save(self) -> None:
'''${SAVE_METHOD_PREFIX_DESCRIPTION}'''
pass
| 9
| 2
| 56
| 9
| 43
| 5
| 2
| 0.37
| 1
| 6
| 0
| 0
| 5
| 10
| 5
| 23
| 350
| 56
| 215
| 61
| 191
| 79
| 116
| 43
| 110
| 3
| 3
| 1
| 9
|
328,316
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/dielectricspectrum.py
|
maicos.modules.dielectricspectrum.DielectricSpectrum
|
import numpy as np
from ..lib.util import bin, charge_neutral, citation_reminder, get_compound, render_docs
from ..core import AnalysisBase
from pathlib import Path
import MDAnalysis as mda
import scipy.constants
from ..lib.math import FT, iFT
import logging
@render_docs
@charge_neutral(filter='error')
class DielectricSpectrum(AnalysisBase):
"""Linear dielectric spectrum.
This module, given a molecular dynamics trajectory, produces a `.txt` file
containing the complex dielectric function as a function of the (linear, not radial
- i.e., :math:`\\nu` or :math:`f`, rather than :math:`\\omega`) frequency, along with
the associated standard deviations. The algorithm is based on the Fluctuation
Dissipation Relation: :math:`\\chi(f) = -1/(3 V k_B T \\varepsilon_0)
\\mathcal{L}[\\theta(t) \\langle P(0) dP(t)/dt\\rangle]`, where :math:`\\mathcal{L}` is
the Laplace transformation.
.. note::
The polarization time series and the average system volume are also saved.
Please read and cite :footcite:p:`carlsonExploringAbsorptionSpectrum2020`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${TEMPERATURE_PARAMETER}
segs : int
Sets the number of segments the trajectory is broken into.
df : float
The desired frequency spacing in THz. This determines the minimum frequency
about which there is data. Overrides `segs` option.
bins : int
Determines the number of bins used for data averaging; (this parameter sets the
upper limit). The data are by default binned logarithmically. This helps to
reduce noise, particularly in the high-frequency domain, and also prevents plot
files from being too large.
binafter : int
The number of low-frequency data points that are left unbinned.
nobin : bool
Prevents the data from being binned altogether. This can result in very large
plot files and errors.
${BASE_CLASS_PARAMETERS}
${OUTPUT_PREFIX_PARAMETER}
Attributes
----------
results
"""
def __init__(self, atomgroup: mda.AtomGroup, temperature: float=300, segs: int=20, df: float | None=None, bins: int=200, binafter: float=20, nobin: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output_prefix: str='') -> None:
self._locals = locals()
wrap_compound = get_compound(atomgroup)
super().__init__(atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, concfreq=concfreq, wrap_compound=wrap_compound, jitter=jitter)
self.temperature = temperature
self.output_prefix = output_prefix
self.segs = segs
self.df = df
self.bins = bins
self.binafter = binafter
self.nobin = nobin
def _prepare(self) -> None:
logging.info('Analysis of the linear dielectric spectrum.')
logging.info(citation_reminder('10.1021/acs.jpca.0c04063'))
if len(self.output_prefix) > 0:
self.output_prefix += '_'
self.dt = self._trajectory.dt * self.step
self.V = 0
self.P = np.zeros((self.n_frames, 3))
def _single_frame(self) -> None:
self.V += self._ts.volume
self.P[self._frame_index, :] = np.dot(self.atomgroup.charges, self.atomgroup.positions)
def _conclude(self) -> None:
self.results.t = self._trajectory.dt * self.frames
self.results.V = self.V / self._index
self.results.P = self.P
if self.df is not None:
self.segs = np.max([int(self.n_frames * self.dt * self.df), 2])
self.seglen = int(self.n_frames / self.segs)
pref = scipy.constants.e ** 2 * scipy.constants.angstrom ** 2
pref /= 3 * self.results.V * scipy.constants.angstrom ** 3
pref /= scipy.constants.k * self.temperature
pref /= scipy.constants.epsilon_0
logging.info('Calculating susceptibility and errors...')
if len(self.results.t) < 2 * self.seglen:
self.results.t = np.append(self.results.t, self.results.t + self.results.t[-1] + self.dt)
self.results.t = self.results.t[:2 * self.seglen]
self.results.nu = FT(self.results.t, np.append(self.results.P[:self.seglen, 0], np.zeros(self.seglen)))[0]
self.results.susc = np.zeros(self.seglen, dtype=complex)
self.results.dsusc = np.zeros(self.seglen, dtype=complex)
ss = np.zeros(2 * self.seglen, dtype=complex)
for s in range(0, self.segs):
logging.info(f'\rSegment {s + 1} of {self.segs}')
ss = 0 + 0j
for self._i in range(3):
FP: np.ndarry = FT(self.results.t, np.append(self.results.P[s * self.seglen:(s + 1) * self.seglen, self._i], np.zeros(self.seglen)), False)
ss += FP.real * FP.real + FP.imag * FP.imag
ss *= self.results.nu * 1j
ift: np.ndarray = iFT(self.results.t, 1j * np.sign(self.results.nu) * FT(self.results.nu, ss, False), False)
ss.real = ift.imag
if s == 0:
self.results.susc += ss[self.seglen:]
else:
ds = ss[self.seglen:] - self.results.susc / s
self.results.susc += ss[self.seglen:]
dif = ss[self.seglen:] - self.results.susc / (s + 1)
ds.real *= dif.real
ds.imag *= dif.imag
self.results.dsusc += ds
self.results.dsusc.real = np.sqrt(self.results.dsusc.real)
self.results.dsusc.imag = np.sqrt(self.results.dsusc.imag)
self.results.susc *= pref / (2 * self.seglen * self.segs * self.dt)
self.results.dsusc *= pref / (2 * self.seglen * self.segs * self.dt)
self.results.nu = self.results.nu[self.seglen:] / (2 * np.pi)
logging.info(f'Length of segments: {self.seglen} frames, {self.seglen * self.dt:.0f} ps')
logging.info(f'Frequency spacing: ~ {self.segs / (self.n_frames * self.dt):.5f} THz')
if not (self.nobin or self.seglen <= self.bins):
bins = np.logspace(np.log(self.binafter) / np.log(10), np.log(len(self.results.susc)) / np.log(10), self.bins - self.binafter + 1).astype(int)
bins = np.unique(np.append(np.arange(self.binafter), bins))[:-1]
self.results.nu_binned = bin(self.results.nu, bins)
self.results.susc_binned = bin(self.results.susc, bins)
self.results.dsusc_binned = bin(self.results.dsusc, bins)
logging.info(f'Binning data above datapoint {self.binafter} in log-spaced bins')
logging.info(f'Binned data consists of {len(self.results.susc)} datapoints')
logging.info(f'Not binning data: there are {len(self.results.susc)} datapoints')
@render_docs
def save(self) -> None:
"""${SAVE_METHOD_PREFIX_DESCRIPTION}"""
np.save(self.output_prefix + 'tseries.npy', self.results.t)
with Path(self.output_prefix + 'V.txt').open(mode='w') as Vfile:
Vfile.write(str(self.results.V))
np.save(self.output_prefix + 'P_tseries.npy', self.results.P)
suscfilename = '{}{}'.format(self.output_prefix, 'susc.dat')
self.savetxt(suscfilename, np.transpose([self.results.nu, self.results.susc.real, self.results.dsusc.real, self.results.susc.imag, self.results.dsusc.imag]), columns=['ν [THz]', 'real(χ)', ' Δ real(χ)', 'imag(χ)', 'Δ imag(χ)'])
logging.info('Susceptibility data saved as {suscfilename}')
if not (self.nobin or self.seglen <= self.bins):
suscfilename = '{}{}'.format(self.output_prefix, 'susc_binned.dat')
self.savetxt(suscfilename, np.transpose([self.results.nu_binned, self.results.susc_binned.real, self.results.dsusc_binned.real, self.results.susc_binned.imag, self.results.dsusc_binned.imag]), columns=['ν [THz]', 'real(χ)', ' Δ real(χ)', 'imag(χ)', 'Δ imag(χ)'])
logging.info('Binned susceptibility data saved as {suscfilename}')
|
@render_docs
@charge_neutral(filter='error')
class DielectricSpectrum(AnalysisBase):
'''Linear dielectric spectrum.
This module, given a molecular dynamics trajectory, produces a `.txt` file
containing the complex dielectric function as a function of the (linear, not radial
- i.e., :math:`\nu` or :math:`f`, rather than :math:`\omega`) frequency, along with
the associated standard deviations. The algorithm is based on the Fluctuation
Dissipation Relation: :math:`\chi(f) = -1/(3 V k_B T \varepsilon_0)
\mathcal{L}[\theta(t) \langle P(0) dP(t)/dt\rangle]`, where :math:`\mathcal{L}` is
the Laplace transformation.
.. note::
The polarization time series and the average system volume are also saved.
Please read and cite :footcite:p:`carlsonExploringAbsorptionSpectrum2020`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${TEMPERATURE_PARAMETER}
segs : int
Sets the number of segments the trajectory is broken into.
df : float
The desired frequency spacing in THz. This determines the minimum frequency
about which there is data. Overrides `segs` option.
bins : int
Determines the number of bins used for data averaging; (this parameter sets the
upper limit). The data are by default binned logarithmically. This helps to
reduce noise, particularly in the high-frequency domain, and also prevents plot
files from being too large.
binafter : int
The number of low-frequency data points that are left unbinned.
nobin : bool
Prevents the data from being binned altogether. This can result in very large
plot files and errors.
${BASE_CLASS_PARAMETERS}
${OUTPUT_PREFIX_PARAMETER}
Attributes
----------
results
'''
def __init__(self, atomgroup: mda.AtomGroup, temperature: float=300, segs: int=20, df: float | None=None, bins: int=200, binafter: float=20, nobin: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output_prefix: str='') -> None:
pass
def _prepare(self) -> None:
pass
def _single_frame(self) -> None:
pass
def _conclude(self) -> None:
pass
@render_docs
def save(self) -> None:
'''${SAVE_METHOD_PREFIX_DESCRIPTION}'''
pass
| 9
| 2
| 43
| 6
| 33
| 4
| 3
| 0.35
| 1
| 8
| 0
| 0
| 5
| 14
| 5
| 17
| 266
| 40
| 167
| 47
| 145
| 59
| 87
| 29
| 81
| 7
| 2
| 2
| 13
|
328,317
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/dielectricsphere.py
|
maicos.modules.dielectricsphere.DielectricSphere
|
import scipy.constants
from ..lib.util import charge_neutral, citation_reminder, get_compound, render_docs
import numpy as np
from ..core import SphereBase
import MDAnalysis as mda
import logging
@render_docs
@charge_neutral(filter='error')
class DielectricSphere(SphereBase):
"""Spherical dielectric profiles.
Computes the inverse radial :math:`\\varepsilon_r^{-1}(r)` component of the
spherical dielectric tensor :math:`\\varepsilon`. The center of the sphere is either
located at the center of the simulation box (default) or at the center of mass of
the ``refgroup``, if provided.
For usage, please refer to :ref:`How-to: Dielectric
constant<howto-dielectric>` and for details on the theory see
:ref:`dielectric-explanations`.
For correlation analysis, the radial (:math:`r`) component is used.
${CORRELATION_INFO}
Also, please read and cite :footcite:p:`schaafDielectricResponseWater2015`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${TEMPERATURE_PARAMETER}
${SPHERE_CLASS_PARAMETERS}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PREFIX_PARAMETER}
Attributes
----------
${RADIAL_CLASS_ATTRIBUTES}
results.eps_rad : numpy.ndarray
Reduced inverse radial dielectric profile (:math:`\\varepsilon^{-1}_r(r) - 1)`
results.deps_rad : numpy.ndarray
Uncertainty of inverse radial dielectric profile
"""
def __init__(self, atomgroup: mda.AtomGroup, temperature: float=300, rmin: float=0, rmax: float | None=None, bin_width: float=0.1, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output_prefix: str='eps_sph') -> None:
self._locals = locals()
self.comp = get_compound(atomgroup)
ix = atomgroup._get_compound_indices(self.comp)
_, self.inverse_ix = np.unique(ix, return_inverse=True)
if rmin != 0 or rmax is not None:
logging.warning('Setting `rmin` and `rmax` might cut off molecules. This will lead to severe artifacts in the dielectric profiles.')
super().__init__(atomgroup, concfreq=concfreq, jitter=jitter, refgroup=refgroup, rmin=rmin, rmax=rmax, bin_width=bin_width, unwrap=unwrap, pack=pack, wrap_compound=self.comp)
self.output_prefix = output_prefix
self.bin_width = bin_width
self.temperature = temperature
def _prepare(self) -> None:
logging.info('Analysis of the inverse radial component of the spherical dielectric tensor.')
logging.info(citation_reminder('10.1103/PhysRevE.92.032718'))
super()._prepare()
def _single_frame(self) -> float:
super()._single_frame()
rbins = np.digitize(self.pos_sph[:, 0], self._obs.bin_edges[1:-1])
curQ_rad = np.bincount(rbins[self.atomgroup.ix], weights=self.atomgroup.charges, minlength=self.n_bins)
self._obs.m_r = -np.cumsum(curQ_rad) / 4 / np.pi / self._obs.bin_pos ** 2
curQ_rad_tot = np.bincount(rbins, weights=self._universe.atoms.charges, minlength=self.n_bins)
self._obs.m_r_tot = -np.cumsum(curQ_rad_tot) / 4 / np.pi / self._obs.bin_pos ** 2
self._obs.M_r = np.sum(self._obs.m_r_tot * self._obs.bin_width)
self._obs.mM_r = self._obs.m_r * self._obs.M_r
return self._obs.M_r
def _conclude(self) -> None:
super()._conclude()
self._pref = 1 / scipy.constants.epsilon_0
self._pref /= scipy.constants.Boltzmann * self.temperature
self._pref /= scipy.constants.angstrom / scipy.constants.elementary_charge ** 2
cov_rad = self.means.mM_r - self.means.m_r * self.means.M_r
dcov_rad = np.sqrt(self.sems.mM_r ** 2 + self.sems.m_r ** 2 * self.means.M_r ** 2 + self.means.m_r ** 2 * self.sems.M_r ** 2)
self.results.eps_rad = 1 - 4 * np.pi * self.results.bin_pos ** 2 * self._pref * cov_rad
self.results.deps_rad = 4 * np.pi * self.results.bin_pos ** 2 * self._pref * dcov_rad
@render_docs
def save(self) -> None:
"""${SAVE_METHOD_PREFIX_DESCRIPTION}"""
outdata_rad = np.array([self.results.bin_pos, self.results.eps_rad, self.results.deps_rad]).T
columns = ['positions [Å]', 'eps_rad - 1', 'eps_rad error']
self.savetxt('{}{}'.format(self.output_prefix, '_rad.dat'), outdata_rad, columns=columns)
|
@render_docs
@charge_neutral(filter='error')
class DielectricSphere(SphereBase):
'''Spherical dielectric profiles.
Computes the inverse radial :math:`\varepsilon_r^{-1}(r)` component of the
spherical dielectric tensor :math:`\varepsilon`. The center of the sphere is either
located at the center of the simulation box (default) or at the center of mass of
the ``refgroup``, if provided.
For usage, please refer to :ref:`How-to: Dielectric
constant<howto-dielectric>` and for details on the theory see
:ref:`dielectric-explanations`.
For correlation analysis, the radial (:math:`r`) component is used.
${CORRELATION_INFO}
Also, please read and cite :footcite:p:`schaafDielectricResponseWater2015`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${TEMPERATURE_PARAMETER}
${SPHERE_CLASS_PARAMETERS}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PREFIX_PARAMETER}
Attributes
----------
${RADIAL_CLASS_ATTRIBUTES}
results.eps_rad : numpy.ndarray
Reduced inverse radial dielectric profile (:math:`\varepsilon^{-1}_r(r) - 1)`
results.deps_rad : numpy.ndarray
Uncertainty of inverse radial dielectric profile
'''
def __init__(self, atomgroup: mda.AtomGroup, temperature: float=300, rmin: float=0, rmax: float | None=None, bin_width: float=0.1, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output_prefix: str='eps_sph') -> None:
pass
def _prepare(self) -> None:
pass
def _single_frame(self) -> float:
pass
def _conclude(self) -> None:
pass
@render_docs
def save(self) -> None:
'''${SAVE_METHOD_PREFIX_DESCRIPTION}'''
pass
| 9
| 2
| 23
| 3
| 18
| 3
| 1
| 0.43
| 1
| 5
| 0
| 0
| 5
| 7
| 5
| 22
| 157
| 28
| 90
| 35
| 70
| 39
| 39
| 21
| 33
| 2
| 3
| 1
| 6
|
328,318
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/dipoleangle.py
|
maicos.modules.dipoleangle.DipoleAngle
|
from ..lib.weights import diporder_weights
from ..lib.util import get_compound, render_docs, unit_vectors_planar
import MDAnalysis as mda
from ..core import AnalysisBase
import numpy as np
import logging
@render_docs
class DipoleAngle(AnalysisBase):
"""Angle timeseries of dipole moments with respect to an axis.
The analysis can be applied to study the orientational dynamics of water molecules
during an excitation pulse. For more details read
:footcite:t:`elgabartyEnergyTransferHydrogen2020`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${PDIM_PLANAR_PARAMETER}
${GROUPING_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
results.t : numpy.ndarray
time (ps).
results.cos_theta_i : numpy.ndarray
Average :math:`\\cos` between dipole and axis.
results.cos_theta_ii : numpy.ndarray
Average :math:`\\cos²` of the dipoles and axis.
results.cos_theta_ij : numpy.ndarray
Product :math:`\\cos` of dipole i and cos of dipole j (``i != j``).
"""
def __init__(self, atomgroup: mda.AtomGroup, pdim: int=2, grouping: str='residues', refgroup: mda.AtomGroup | None=None, unwrap: bool=False, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='dipangle.dat') -> None:
self._locals = locals()
self.wrap_compound = get_compound(atomgroup)
super().__init__(atomgroup, refgroup=refgroup, unwrap=unwrap, pack=pack, concfreq=concfreq, wrap_compound=self.wrap_compound, jitter=jitter)
self.grouping = grouping
self.pdim = pdim
self.output = output
def _prepare(self) -> None:
logging.info('Analysis of the dipole moment angles (timeseries).')
self.n_residues = self.atomgroup.residues.n_residues
def get_unit_vectors(atomgroup: mda.AtomGroup, grouping: str):
return unit_vectors_planar(atomgroup=atomgroup, grouping=grouping, pdim=self.pdim)
self.get_unit_vectors = get_unit_vectors
self.cos_theta_i = np.empty(self.n_frames)
self.cos_theta_ii = np.empty(self.n_frames)
self.cos_theta_ij = np.empty(self.n_frames)
def _single_frame(self) -> None:
cos_theta = diporder_weights(self.atomgroup, grouping=self.grouping, order_parameter='cos_theta', get_unit_vectors=self.get_unit_vectors)
matrix = np.outer(cos_theta, cos_theta)
trace = matrix.trace()
self.cos_theta_i[self._frame_index] = cos_theta.mean()
self.cos_theta_ii[self._frame_index] = trace / self.n_residues
self.cos_theta_ij[self._frame_index] = matrix.sum() - trace
self.cos_theta_ij[self._frame_index] /= self.n_residues ** 2 - self.n_residues
def _conclude(self) -> None:
self.results.t = self.times
self.results.cos_theta_i = self.cos_theta_i[:self._index]
self.results.cos_theta_ii = self.cos_theta_ii[:self._index]
self.results.cos_theta_ij = self.cos_theta_ij[:self._index]
@render_docs
def save(self) -> None:
"""${SAVE_METHOD_DESCRIPTION}"""
self.savetxt(self.output, np.vstack([self.results.t, self.results.cos_theta_i, self.results.cos_theta_ii, self.results.cos_theta_ij]).T, columns=['t', '<cos(θ_i)>', '<cos(θ_i)cos(θ_i)>', '<cos(θ_i)cos(θ_j)>'])
|
@render_docs
class DipoleAngle(AnalysisBase):
'''Angle timeseries of dipole moments with respect to an axis.
The analysis can be applied to study the orientational dynamics of water molecules
during an excitation pulse. For more details read
:footcite:t:`elgabartyEnergyTransferHydrogen2020`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${PDIM_PLANAR_PARAMETER}
${GROUPING_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
results.t : numpy.ndarray
time (ps).
results.cos_theta_i : numpy.ndarray
Average :math:`\cos` between dipole and axis.
results.cos_theta_ii : numpy.ndarray
Average :math:`\cos²` of the dipoles and axis.
results.cos_theta_ij : numpy.ndarray
Product :math:`\cos` of dipole i and cos of dipole j (``i != j``).
'''
def __init__(self, atomgroup: mda.AtomGroup, pdim: int=2, grouping: str='residues', refgroup: mda.AtomGroup | None=None, unwrap: bool=False, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='dipangle.dat') -> None:
pass
def _prepare(self) -> None:
pass
def get_unit_vectors(atomgroup: mda.AtomGroup, grouping: str):
pass
def _single_frame(self) -> None:
pass
def _conclude(self) -> None:
pass
@render_docs
def save(self) -> None:
'''${SAVE_METHOD_DESCRIPTION}'''
pass
| 9
| 2
| 13
| 1
| 12
| 0
| 1
| 0.33
| 1
| 5
| 0
| 0
| 5
| 11
| 5
| 17
| 106
| 13
| 70
| 33
| 51
| 23
| 32
| 20
| 25
| 1
| 2
| 0
| 6
|
328,319
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/dipordercylinder.py
|
maicos.modules.dipordercylinder.DiporderCylinder
|
from ..lib.weights import diporder_weights
import logging
from ..core import ProfileCylinderBase
import MDAnalysis as mda
from ..lib.util import render_docs, unit_vectors_cylinder
@render_docs
class DiporderCylinder(ProfileCylinderBase):
"""Cylindrical dipolar order parameters.
${DIPORDER_DESCRIPTION}
${CORRELATION_INFO_RADIAL}
Parameters
----------
${ATOMGROUP_PARAMETER}
${ORDER_PARAMETER_PARAMETER}
${PDIM_RADIAL_PARAMETER}
${PROFILE_CYLINDER_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_CYLINDER_CLASS_ATTRIBUTES}
"""
def __init__(self, atomgroup: mda.AtomGroup, order_parameter: str='P0', pdim: str='r', dim: int=2, zmin: float | None=None, zmax: float | None=None, rmin: float=0, rmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='residues', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='diporder_cylinder.dat') -> None:
normalization = 'volume' if order_parameter == 'P0' else 'number'
def get_unit_vectors(atomgroup: mda.AtomGroup, grouping: str):
return unit_vectors_cylinder(atomgroup=atomgroup, grouping=grouping, bin_method=bin_method, dim=dim, pdim=pdim)
super().__init__(atomgroup=atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, concfreq=concfreq, dim=dim, zmin=zmin, zmax=zmax, bin_width=bin_width, rmin=rmin, rmax=rmax, grouping=grouping, bin_method=bin_method, output=output, weighting_function=diporder_weights, weighting_function_kwargs={'order_parameter': order_parameter, 'get_unit_vectors': get_unit_vectors}, normalization=normalization)
def _prepare(self):
logging.info('Analysis of the cylindrical dipolar order parameters.')
super()._prepare()
|
@render_docs
class DiporderCylinder(ProfileCylinderBase):
'''Cylindrical dipolar order parameters.
${DIPORDER_DESCRIPTION}
${CORRELATION_INFO_RADIAL}
Parameters
----------
${ATOMGROUP_PARAMETER}
${ORDER_PARAMETER_PARAMETER}
${PDIM_RADIAL_PARAMETER}
${PROFILE_CYLINDER_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_CYLINDER_CLASS_ATTRIBUTES}
'''
def __init__(self, atomgroup: mda.AtomGroup, order_parameter: str='P0', pdim: str='r', dim: int=2, zmin: float | None=None, zmax: float | None=None, rmin: float=0, rmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='residues', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='diporder_cylinder.dat') -> None:
pass
def get_unit_vectors(atomgroup: mda.AtomGroup, grouping: str):
pass
def _prepare(self):
pass
| 5
| 1
| 22
| 1
| 21
| 0
| 1
| 0.25
| 1
| 5
| 0
| 0
| 2
| 0
| 2
| 36
| 79
| 9
| 56
| 24
| 33
| 14
| 9
| 5
| 5
| 2
| 5
| 0
| 4
|
328,320
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/diporderplanar.py
|
maicos.modules.diporderplanar.DiporderPlanar
|
import logging
from ..lib.util import render_docs, unit_vectors_planar
import MDAnalysis as mda
from ..lib.weights import diporder_weights
from ..core import ProfilePlanarBase
@render_docs
class DiporderPlanar(ProfilePlanarBase):
"""Cartesian dipolar order parameters.
${DIPORDER_DESCRIPTION}
${CORRELATION_INFO_PLANAR}
Parameters
----------
${ATOMGROUP_PARAMETER}
${ORDER_PARAMETER_PARAMETER}
${PDIM_PLANAR_PARAMETER}
${PROFILE_PLANAR_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_PLANAR_CLASS_ATTRIBUTES}
"""
def __init__(self, atomgroup: mda.AtomGroup, order_parameter: str='P0', pdim: int=2, dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='residues', sym: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='diporder_planar.dat') -> None:
self._locals = locals()
normalization = 'volume' if order_parameter == 'P0' else 'number'
def get_unit_vectors(atomgroup: mda.AtomGroup, grouping: str):
return unit_vectors_planar(atomgroup=atomgroup, grouping=grouping, pdim=pdim)
super().__init__(atomgroup=atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, concfreq=concfreq, dim=dim, zmin=zmin, zmax=zmax, bin_width=bin_width, sym=sym, sym_odd=True, grouping=grouping, bin_method=bin_method, output=output, weighting_function=diporder_weights, weighting_function_kwargs={'order_parameter': order_parameter, 'get_unit_vectors': get_unit_vectors}, normalization=normalization)
def _prepare(self):
logging.info('Analysis of the cartesian dipolar order parameters.')
super()._prepare()
|
@render_docs
class DiporderPlanar(ProfilePlanarBase):
'''Cartesian dipolar order parameters.
${DIPORDER_DESCRIPTION}
${CORRELATION_INFO_PLANAR}
Parameters
----------
${ATOMGROUP_PARAMETER}
${ORDER_PARAMETER_PARAMETER}
${PDIM_PLANAR_PARAMETER}
${PROFILE_PLANAR_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_PLANAR_CLASS_ATTRIBUTES}
'''
def __init__(self, atomgroup: mda.AtomGroup, order_parameter: str='P0', pdim: int=2, dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='residues', sym: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='diporder_planar.dat') -> None:
pass
def get_unit_vectors(atomgroup: mda.AtomGroup, grouping: str):
pass
def _prepare(self):
pass
| 5
| 1
| 19
| 1
| 18
| 0
| 1
| 0.27
| 1
| 5
| 0
| 0
| 2
| 1
| 2
| 31
| 75
| 9
| 52
| 24
| 30
| 14
| 10
| 6
| 6
| 2
| 4
| 0
| 4
|
328,321
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/dipordersphere.py
|
maicos.modules.dipordersphere.DiporderSphere
|
from ..lib.util import render_docs, unit_vectors_sphere
from ..lib.weights import diporder_weights
import logging
import MDAnalysis as mda
from ..core import ProfileSphereBase
@render_docs
class DiporderSphere(ProfileSphereBase):
"""Spherical dipolar order parameters.
${DIPORDER_DESCRIPTION}
${CORRELATION_INFO_RADIAL}
Parameters
----------
${ATOMGROUP_PARAMETER}
${ORDER_PARAMETER_PARAMETER}
${PROFILE_SPHERE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_SPHERE_CLASS_ATTRIBUTES}
"""
def __init__(self, atomgroup: mda.AtomGroup, order_parameter: str='P0', rmin: float=0, rmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='residues', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='diporder_sphere.dat') -> None:
normalization = 'volume' if order_parameter == 'P0' else 'number'
def get_unit_vectors(atomgroup: mda.AtomGroup, grouping: str):
return unit_vectors_sphere(atomgroup=atomgroup, grouping=grouping, bin_method=bin_method)
super().__init__(atomgroup=atomgroup, unwrap=unwrap, pack=pack, jitter=jitter, refgroup=refgroup, concfreq=concfreq, rmin=rmin, rmax=rmax, bin_width=bin_width, grouping=grouping, bin_method=bin_method, output=output, weighting_function=diporder_weights, weighting_function_kwargs={'order_parameter': order_parameter, 'get_unit_vectors': get_unit_vectors}, normalization=normalization)
def _prepare(self):
logging.info('Analysis of the spherical dipolar order parameters.')
super()._prepare()
|
@render_docs
class DiporderSphere(ProfileSphereBase):
'''Spherical dipolar order parameters.
${DIPORDER_DESCRIPTION}
${CORRELATION_INFO_RADIAL}
Parameters
----------
${ATOMGROUP_PARAMETER}
${ORDER_PARAMETER_PARAMETER}
${PROFILE_SPHERE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_SPHERE_CLASS_ATTRIBUTES}
'''
def __init__(self, atomgroup: mda.AtomGroup, order_parameter: str='P0', rmin: float=0, rmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='residues', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='diporder_sphere.dat') -> None:
pass
def get_unit_vectors(atomgroup: mda.AtomGroup, grouping: str):
pass
def _prepare(self):
pass
| 5
| 1
| 17
| 1
| 16
| 0
| 1
| 0.29
| 1
| 5
| 0
| 0
| 2
| 0
| 2
| 30
| 67
| 9
| 45
| 20
| 26
| 13
| 9
| 5
| 5
| 2
| 4
| 0
| 4
|
328,322
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/diporderstructurefactor.py
|
maicos.modules.diporderstructurefactor.DiporderStructureFactor
|
import MDAnalysis as mda
from ..lib.weights import diporder_weights
import numpy as np
from ..lib.util import get_center, render_docs, unit_vectors_planar
from ..core import AnalysisBase
from ..lib.math import structure_factor
import logging
@render_docs
class DiporderStructureFactor(AnalysisBase):
"""Structure factor for dipoles.
Extension the standard structure factor :math:`S(q)` by weighting it with different
the normalized dipole moment :math:`\\hat{\\boldsymbol{\\mu}}` of a ``group`` according
to
.. math::
S(q)_{\\hat{\\boldsymbol{\\mu}} \\hat{\\boldsymbol{\\mu}}} = \\left \\langle
\\frac{1}{N} \\sum_{i,j=1}^N \\hat \\mu_i \\hat \\mu_j \\, \\exp(-i\\boldsymbol q\\cdot
[\\boldsymbol r_i - \\boldsymbol r_j]) \\right \\rangle
For the correlation time estimation the module will use the value of the structure
factor with the smallest possible :math:`q` value.
For an detailed example on the usage refer to the :ref:`how-to on dipolar
correlation functions <howto-spatial-dipole-dipole-correlations>`. For general
details on the theory behind the structure factor refer to :ref:`saxs-explanations`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${Q_SPACE_PARAMETERS}
${BIN_METHOD_PARAMETER}
${GROUPING_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
results.q : numpy.ndarray
length of binned q-vectors
results.structure_factors : numpy.ndarray
Structure factor
"""
def __init__(self, atomgroup: mda.AtomGroup, qmin: float=0, qmax: float=6, dq: float=0.01, bin_method: str='com', grouping: str='molecules', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='sq.dat') -> None:
self._locals = locals()
super().__init__(atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, wrap_compound=grouping, concfreq=concfreq)
self.bin_method = str(bin_method).lower()
self.qmin = qmin
self.qmax = qmax
self.dq = dq
self.output = output
def _prepare(self) -> None:
logging.info('Analysis of the structure factor of dipoles.')
self.n_bins = int(np.ceil((self.qmax - self.qmin) / self.dq))
def _single_frame(self) -> float:
box = np.diag(mda.lib.mdamath.triclinic_vectors(self._ts.dimensions))
positions = get_center(atomgroup=self.atomgroup, bin_method=self.bin_method, compound=self.wrap_compound)
self._obs.structure_factors = np.zeros(self.n_bins)
for pdim in range(3):
def get_unit_vectors(atomgroup: mda.AtomGroup, grouping: str, pdim: int=pdim):
return unit_vectors_planar(atomgroup=atomgroup, grouping=grouping, pdim=pdim)
weights = diporder_weights(atomgroup=self.atomgroup, grouping=self.wrap_compound, order_parameter='cos_theta', get_unit_vectors=get_unit_vectors)
scattering_vectors, structure_factors = structure_factor(np.double(positions), np.double(box), self.qmin, self.qmax, 0, np.pi, weights)
scattering_vectors = scattering_vectors.flatten()
structure_factors = structure_factors.flatten()
nonzeros = np.where(structure_factors != 0)[0]
scattering_vectors = scattering_vectors[nonzeros]
structure_factors = structure_factors[nonzeros]
histogram_kwargs = dict(a=scattering_vectors, bins=self.n_bins, range=(self.qmin, self.qmax))
structure_factors_binned, _ = np.histogram(weights=structure_factors, **histogram_kwargs)
bincount, _ = np.histogram(weights=None, **histogram_kwargs)
with np.errstate(invalid='ignore'):
structure_factors_binned /= bincount
self._obs.structure_factors += np.nan_to_num(structure_factors_binned)
self._obs.structure_factors /= len(positions)
return self._obs.structure_factors[-1]
def _conclude(self) -> None:
scattering_vectors = np.arange(self.qmin, self.qmax, self.dq) + 0.5 * self.dq
nonzeros = np.where(self.means.structure_factors != 0)[0]
structure_factors = self.means.structure_factors[nonzeros]
self.results.scattering_vectors = scattering_vectors[nonzeros]
self.results.structure_factors = structure_factors
@render_docs
def save(self) -> None:
"""${SAVE_METHOD_DESCRIPTION}"""
self.savetxt(self.output, np.vstack([self.results.scattering_vectors, self.results.structure_factors]).T, columns=['q (1/Å)', 'S(q) (arb. units)'])
|
@render_docs
class DiporderStructureFactor(AnalysisBase):
'''Structure factor for dipoles.
Extension the standard structure factor :math:`S(q)` by weighting it with different
the normalized dipole moment :math:`\hat{\boldsymbol{\mu}}` of a ``group`` according
to
.. math::
S(q)_{\hat{\boldsymbol{\mu}} \hat{\boldsymbol{\mu}}} = \left \langle
\frac{1}{N} \sum_{i,j=1}^N \hat \mu_i \hat \mu_j \, \exp(-i\boldsymbol q\cdot
[\boldsymbol r_i - \boldsymbol r_j]) \right \rangle
For the correlation time estimation the module will use the value of the structure
factor with the smallest possible :math:`q` value.
For an detailed example on the usage refer to the :ref:`how-to on dipolar
correlation functions <howto-spatial-dipole-dipole-correlations>`. For general
details on the theory behind the structure factor refer to :ref:`saxs-explanations`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${Q_SPACE_PARAMETERS}
${BIN_METHOD_PARAMETER}
${GROUPING_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
results.q : numpy.ndarray
length of binned q-vectors
results.structure_factors : numpy.ndarray
Structure factor
'''
def __init__(self, atomgroup: mda.AtomGroup, qmin: float=0, qmax: float=6, dq: float=0.01, bin_method: str='com', grouping: str='molecules', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='sq.dat') -> None:
pass
def _prepare(self) -> None:
pass
def _single_frame(self) -> float:
pass
def get_unit_vectors(atomgroup: mda.AtomGroup, grouping: str, pdim: int=pdim):
pass
def _conclude(self) -> None:
pass
@render_docs
def save(self) -> None:
'''${SAVE_METHOD_DESCRIPTION}'''
pass
| 9
| 2
| 20
| 2
| 17
| 1
| 1
| 0.32
| 1
| 8
| 0
| 0
| 5
| 9
| 5
| 17
| 155
| 26
| 98
| 45
| 74
| 31
| 42
| 26
| 35
| 2
| 2
| 2
| 7
|
328,323
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/kineticenergy.py
|
maicos.modules.kineticenergy.KineticEnergy
|
from ..core import AnalysisBase
import logging
from ..lib.util import get_compound, render_docs
import MDAnalysis as mda
import numpy as np
@render_docs
class KineticEnergy(AnalysisBase):
"""Kinetic energy timeseries.
The kinetic energy function computes the translational and rotational kinetic energy
with respect to molecular center (center of mass, center of charge) of a molecular
dynamics simulation trajectory.
The analysis can be applied to study the dynamics of water molecules during an
excitation pulse. For more details read
:footcite:t:`elgabartyEnergyTransferHydrogen2020`.
Parameters
----------
${ATOMGROUP_PARAMETER}
refpoint : str
reference point for molecular center: center of mass (``"com"``) or center of
charge (``"coc"``).
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
results.t : numpy.ndarray
time (ps).
results.trans : numpy.ndarray
translational kinetic energy (kJ/mol).
results.rot : numpy.ndarray
rotational kinetic energy (kJ/mol).
"""
def __init__(self, atomgroup: mda.AtomGroup, refpoint: str='com', refgroup: mda.AtomGroup | None=None, unwrap: bool=False, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='ke.dat') -> None:
self._locals = locals()
self.comp = get_compound(atomgroup)
super().__init__(atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, concfreq=concfreq, wrap_compound=self.comp)
self.output = output
self.refpoint = refpoint.lower()
def _prepare(self) -> None:
"""Set things up before the analysis loop begins."""
logging.info('Analysis of the kinetic energy timeseries.')
if self.refpoint not in ['com', 'coc']:
raise ValueError(f"Invalid choice for dens: {self.refpoint} (choose from 'com' or 'coc')")
self.masses = self.atomgroup.accumulate(self.atomgroup.masses, compound=self.comp)
self.abscharges = self.atomgroup.accumulate(np.abs(self.atomgroup.charges), compound=self.comp)
self.E_kin = np.zeros(self.n_frames)
self.E_center = np.zeros(self.n_frames)
def _single_frame(self) -> None:
self.E_kin[self._frame_index] = np.dot(self.atomgroup.masses, np.linalg.norm(self.atomgroup.velocities, axis=1) ** 2)
if self.refpoint == 'com':
massvel = self.atomgroup.velocities * self.atomgroup.masses[:, np.newaxis]
v = self.atomgroup.accumulate(massvel, compound=get_compound(self.atomgroup))
v /= self.masses[:, np.newaxis]
elif self.refpoint == 'coc':
abschargevel = self.atomgroup.velocities * np.abs(self.atomgroup.charges)[:, np.newaxis]
v = self.atomgroup.accumulate(abschargevel, compound=get_compound(self.atomgroup))
v /= self.abscharges[:, np.newaxis]
self.E_center[self._frame_index] = np.dot(self.masses, np.linalg.norm(v, axis=1) ** 2)
def _conclude(self) -> None:
self.results.t = self.times
self.results.trans = self.E_center / 2 / 100
self.results.rot = (self.E_kin - self.E_center) / 2 / 100
@render_docs
def save(self) -> None:
"""${SAVE_METHOD_DESCRIPTION}"""
self.savetxt(self.output, np.vstack([self.results.t, self.results.trans, self.results.rot]).T, columns=['t', 'E_kin^trans [kJ/mol]', 'E_kin^rot [kJ/mol]'])
|
@render_docs
class KineticEnergy(AnalysisBase):
'''Kinetic energy timeseries.
The kinetic energy function computes the translational and rotational kinetic energy
with respect to molecular center (center of mass, center of charge) of a molecular
dynamics simulation trajectory.
The analysis can be applied to study the dynamics of water molecules during an
excitation pulse. For more details read
:footcite:t:`elgabartyEnergyTransferHydrogen2020`.
Parameters
----------
${ATOMGROUP_PARAMETER}
refpoint : str
reference point for molecular center: center of mass (``"com"``) or center of
charge (``"coc"``).
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
results.t : numpy.ndarray
time (ps).
results.trans : numpy.ndarray
translational kinetic energy (kJ/mol).
results.rot : numpy.ndarray
rotational kinetic energy (kJ/mol).
'''
def __init__(self, atomgroup: mda.AtomGroup, refpoint: str='com', refgroup: mda.AtomGroup | None=None, unwrap: bool=False, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='ke.dat') -> None:
pass
def _prepare(self) -> None:
'''Set things up before the analysis loop begins.'''
pass
def _single_frame(self) -> None:
pass
def _conclude(self) -> None:
pass
@render_docs
def save(self) -> None:
'''${SAVE_METHOD_DESCRIPTION}'''
pass
| 8
| 3
| 16
| 1
| 14
| 1
| 2
| 0.38
| 1
| 6
| 0
| 0
| 5
| 8
| 5
| 17
| 118
| 17
| 73
| 28
| 56
| 28
| 31
| 17
| 25
| 3
| 2
| 1
| 8
|
328,324
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/pdfcylinder.py
|
maicos.modules.pdfcylinder.PDFCylinder
|
from MDAnalysis.lib.distances import capped_distance
import numpy as np
import MDAnalysis as mda
import logging
from ..lib.math import transform_cylinder
from ..lib.util import get_center, get_compound, render_docs
from ..core import CylinderBase
@render_docs
class PDFCylinder(CylinderBase):
"""Shell-wise one-dimensional (cylindrical) pair distribution functions.
The one-dimensional pair distribution functions :math:`g_{\\text{1d}}(\\phi)`
and :math:`g_{\\text{1d}}(z)` describes the pair distribution to particles
which lie on the same cylinder along the angular and axial directions
respectively. These functions can be used in cylindrical systems that are
inhomogeneous along radial coordinate, and homogeneous in the angular and
axial directions. It gives the average number density of :math:`g2` as a
function of angular and axial distances respectively from a :math:`g1` atom.
Then the angular pair distribution function is
.. math::
g_{\\text{1d}}(\\phi) = \\left \\langle \\sum_{i}^{N_{g_1}}
\\sum_{j}^{N_{g2}} \\delta(\\phi - \\phi_{ij}) \\delta(R_{ij}) \\delta(z_{ij})
\\right \\rangle
And the axial pair distribution function is
.. math::
g_{\\text{1d}}(z) = \\left \\langle \\sum_{i}^{N_{g_1}}
\\sum_{j}^{N_{g2}} \\delta(z - z_{ij}) \\delta(R_{ij}) \\delta(\\phi_{ij})
\\right \\rangle
Even though due to consistency reasons the results are called pair distribution
functions the output is not unitless. The default output is is in dimension of
number/volume in :math:`Å^{-3}`. If ``density`` is set to :py:obj:`True`, the
output is normalised by the density of :math:`g2`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${PDF_PARAMETERS}
bin_width_pdf_z : float
Binwidth of bins in the histogram of the axial PDF (Å).
bin_width_pdf_phi : float
Binwidth of bins in the histogram of the angular PDF (Å).
drwidth : float
radial width of a PDF cylindrical shell (Å), and axial or angular (arc) slices.
dmin: float
the minimum pairwise distance between 'g1' and 'g2' (Å).
dmax : float
the maximum pairwise distance between 'g1' and 'g2' (Å).
density : bool
normalise the PDF by the density of 'g2' (:math:`Å^{-3}`).
origin : numpy.ndarray
Set origin of the cylindrical coordinate system (x,y,z). If :obj:`None` the
origin will be set according to the ``refgroup`` parameter.
${CYLINDER_CLASS_PARAMETERS}
${BIN_METHOD_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${CYLINDER_CLASS_ATTRIBUTES}
results.phi_bins: numpy.ndarray
Angular distances to which the PDF is calculated with shape (`pdf_nbins`) (Å)
results.z_bins: numpy.ndarray
axial distances to which the PDF is calculated with shape (`pdf_nbins`) (Å)
results.phi_pdf: numpy.ndarray
Angular PDF with shape (`pdf_nbins`, `n_bins`) (:math:`\\text{Å}^{-3}`)
results.z_pdf: numpy.ndarray
Axial PDF with shape (`pdf_nbins`, `n_bins`) (:math:`\\text{Å}^{-3}`)
"""
def __init__(self, g1: mda.AtomGroup, g2: mda.AtomGroup | None=None, bin_width_pdf_z: float=0.3, bin_width_pdf_phi: float=0.1, drwidth: float=0.1, dmin: float | None=None, dmax: float | None=None, density: bool=False, origin: np.ndarray | None=None, dim: int=2, zmin: float | None=None, zmax: float | None=None, rmin: float=0, rmax: float | None=None, bin_width: float=1, bin_method: str='com', refgroup: mda.AtomGroup | None=None, unwrap: bool=False, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='pdf.dat') -> None:
self.comp_1 = get_compound(g1)
super().__init__(atomgroup=g1, refgroup=refgroup, unwrap=unwrap, pack=pack, concfreq=concfreq, jitter=jitter, dim=dim, rmin=rmin, rmax=rmax, zmin=zmin, zmax=zmax, bin_width=bin_width, wrap_compound=self.comp_1)
self.g1 = g1
if g2 is None:
self.g2 = g1
else:
self.g2 = g2
self.bin_width_pdf_phi = bin_width_pdf_phi
self.bin_width_pdf_z = bin_width_pdf_z
self.drwidth = drwidth
self.bin_width = bin_width
self.output = output
self.bin_method = bin_method.lower()
if origin is not None and origin.shape != (3,):
raise ValueError(f'Origin has length {origin.shape} but only (3,) is allowed.')
self.origin = origin
self.comp_2 = get_compound(self.g2)
self.nbins_pdf_phi = 100
self.nbins_pdf_z = 100
self.dmin = dmin
self.dmax = dmax
self.density = density
def _prepare(self) -> None:
super()._prepare()
logging.info('Analysis of the cylindrical pair distribution function.')
if self.origin is None:
self.origin = self.box_center
if self.dmin is None:
self.dmin = 0
if self.dmax is None:
self.dmax = self.box_center[self.dim]
elif self.dmax > self.box_center[self.dim]:
raise ValueError('Axial range of PDF exceeds half of the box size. This will lead to unexpected results.')
if self.bin_width_pdf_z > 0:
self.nbins_pdf_z = int(np.ceil((self.dmax - self.dmin) / self.bin_width_pdf_z))
self.bin_width_pdf_z = (self.dmax - self.dmin) / self.nbins_pdf_z
else:
raise ValueError('PDF bin_width must be a positive number.')
if self.bin_width_pdf_phi > 0:
self.nbins_pdf_phi = int(np.ceil(np.pi / self.bin_width_pdf_phi))
self.bin_width_pdf_phi = np.pi / self.nbins_pdf_phi
else:
raise ValueError('PDF bin_width must be a positive number.')
if self.bin_method not in ['cog', 'com', 'coc']:
raise ValueError(f'{self.bin_method} is an unknown binning method. Use `cog`, `com` or `coc`.')
logging.info(f'Using {self.nbins_pdf_phi} pdf bins in phi direction and {self.nbins_pdf_z} in z direction.')
def _single_frame(self) -> None:
super()._single_frame()
self._obs.n_g1 = np.zeros((self.n_bins, 1))
self._obs.n_g2 = np.zeros((self.n_bins, 1))
self._obs.count_phi = np.zeros((self.n_bins, self.nbins_pdf_phi))
self._obs.count_z = np.zeros((self.n_bins, self.nbins_pdf_z))
g1_bin_positions = get_center(atomgroup=self.g1, bin_method=self.bin_method, compound=self.comp_1)
g2_bin_positions = get_center(atomgroup=self.g2, bin_method=self.bin_method, compound=self.comp_2)
g1_bin_positions_cyl = transform_cylinder(g1_bin_positions, origin=self.origin, dim=self.dim)
g2_bin_positions_cyl = transform_cylinder(g2_bin_positions, origin=self.origin, dim=self.dim)
for r_bin in range(0, self.n_bins):
g1_in_rbin_positions = g1_bin_positions_cyl[np.logical_and(g1_bin_positions_cyl[:, 0] >= self._obs.bin_edges[r_bin], g1_bin_positions_cyl[:, 0] < self._obs.bin_edges[r_bin + 1])]
g2_in_rbin_positions = g2_bin_positions_cyl[np.logical_and(g2_bin_positions_cyl[:, 0] >= self._obs.bin_edges[r_bin] - self.drwidth, g2_bin_positions_cyl[:, 0] < self._obs.bin_edges[r_bin + 1] + self.drwidth)]
self._obs.n_g1[r_bin] = len(g1_in_rbin_positions)
self._obs.n_g2[r_bin] = len(g2_in_rbin_positions)
r_pairs = capped_distance(g1_in_rbin_positions * [1, 0, 0], g2_in_rbin_positions * [1, 0, 0], self.drwidth, box=None, return_distances=False)
phi_pairs = capped_distance(g1_in_rbin_positions * [0, 1, 0], g2_in_rbin_positions * [0, 1, 0], self.drwidth / self._obs.bin_pos[r_bin], box=[0, 2 * np.pi, 0, 90, 90, 90], return_distances=False)
z_pairs = capped_distance(g1_in_rbin_positions * [0, 0, 1], g2_in_rbin_positions * [0, 0, 1], self.drwidth, box=[0, 0, self._universe.dimensions[self.dim], 90, 90, 90], return_distances=False)
phi_dist_pairs, phi_distances = capped_distance(g1_in_rbin_positions * [0, 1, 0], g2_in_rbin_positions * [0, 1, 0], np.pi, box=[0, 2 * np.pi, 0, 90, 90, 90])
z_dist_pairs, z_distances = capped_distance(g1_in_rbin_positions * [0, 0, 1], g2_in_rbin_positions * [0, 0, 1], self.dmax, box=[0, 0, self._universe.dimensions[self.dim], 90, 90, 90])
r_pairs_encode = r_pairs[:, 0] + self._obs.n_g2[r_bin] * r_pairs[:, 1]
phi_pairs_encode = phi_pairs[:, 0] + self._obs.n_g2[r_bin] * phi_pairs[:, 1]
z_pairs_encode = z_pairs[:, 0] + self._obs.n_g2[r_bin] * z_pairs[:, 1]
phi_dist_pairs_encode = phi_dist_pairs[:, 0] + self._obs.n_g2[r_bin] * phi_dist_pairs[:, 1]
z_dist_pairs_encode = z_dist_pairs[:, 0] + self._obs.n_g2[r_bin] * z_dist_pairs[:, 1]
mask_in_dr_and_dz = np.isin(phi_dist_pairs_encode, r_pairs_encode) * np.isin(phi_dist_pairs_encode, z_pairs_encode)
mask_in_dr_and_dphi = np.isin(z_dist_pairs_encode, r_pairs_encode) * np.isin(z_dist_pairs_encode, phi_pairs_encode)
mask_same_atom = phi_distances > 0
relevant_phi_distances = phi_distances[mask_in_dr_and_dz * mask_same_atom]
mask_same_atom = z_distances > 0
relevant_z_distances = z_distances[mask_in_dr_and_dphi * mask_same_atom]
self._obs.count_phi[r_bin] = np.histogram(relevant_phi_distances, bins=self.nbins_pdf_phi, range=(0, np.pi))[0]
self._obs.count_z[r_bin] = np.histogram(relevant_z_distances, bins=self.nbins_pdf_z, range=(self.dmin, self.dmax))[0]
def _conclude(self) -> None:
super()._conclude()
g2_density = self.means.n_g2 / self.means.bin_volume if self.density else 1
phi_norm = np.array([2 * (self.means.bin_edges[1:] + self.means.bin_edges[:-1]) / 2 * self.bin_width_pdf_phi * 2 * self.drwidth * 2 * self.drwidth]).T * g2_density
z_norm = 2 * self.bin_width_pdf_z * 2 * self.drwidth * 2 * self.drwidth * g2_density
with np.errstate(invalid='ignore', divide='ignore'):
pdf_phi = self.means.count_phi / self.means.n_g1 / phi_norm
self.results.pdf_phi = np.nan_to_num(pdf_phi, nan=0, posinf=0, neginf=0)
with np.errstate(invalid='ignore', divide='ignore'):
pdf_z = self.means.count_z / self.means.n_g1 / z_norm
self.results.pdf_z = np.nan_to_num(pdf_z, nan=0, posinf=0, neginf=0)
edges_phi = np.histogram([-1], bins=self.nbins_pdf_phi, range=(0, np.pi))[1]
edges_z = np.histogram([-1], bins=self.nbins_pdf_z, range=(self.dmin, self.dmax))[1]
self.results.bins_phi = 0.5 * (edges_phi[1:] + edges_phi[:-1])
self.results.bins_z = 0.5 * (edges_z[1:] + edges_z[:-1])
@render_docs
def save(self) -> None:
"""${SAVE_METHOD_DESCRIPTION}"""
columns = ['r [Å]']
for r in self.results.bin_pos:
columns.append(f'pdf at {r:.2f} Å [Å^-3]')
self.savetxt('phi_' + self.output, np.hstack([self.results.bins_phi[:, np.newaxis], self.results.pdf_phi.T]), columns=columns)
self.savetxt('z_' + self.output, np.hstack([self.results.bins_z[:, np.newaxis], self.results.pdf_z.T]), columns=columns)
|
@render_docs
class PDFCylinder(CylinderBase):
'''Shell-wise one-dimensional (cylindrical) pair distribution functions.
The one-dimensional pair distribution functions :math:`g_{\text{1d}}(\phi)`
and :math:`g_{\text{1d}}(z)` describes the pair distribution to particles
which lie on the same cylinder along the angular and axial directions
respectively. These functions can be used in cylindrical systems that are
inhomogeneous along radial coordinate, and homogeneous in the angular and
axial directions. It gives the average number density of :math:`g2` as a
function of angular and axial distances respectively from a :math:`g1` atom.
Then the angular pair distribution function is
.. math::
g_{\text{1d}}(\phi) = \left \langle \sum_{i}^{N_{g_1}}
\sum_{j}^{N_{g2}} \delta(\phi - \phi_{ij}) \delta(R_{ij}) \delta(z_{ij})
\right \rangle
And the axial pair distribution function is
.. math::
g_{\text{1d}}(z) = \left \langle \sum_{i}^{N_{g_1}}
\sum_{j}^{N_{g2}} \delta(z - z_{ij}) \delta(R_{ij}) \delta(\phi_{ij})
\right \rangle
Even though due to consistency reasons the results are called pair distribution
functions the output is not unitless. The default output is is in dimension of
number/volume in :math:`Å^{-3}`. If ``density`` is set to :py:obj:`True`, the
output is normalised by the density of :math:`g2`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${PDF_PARAMETERS}
bin_width_pdf_z : float
Binwidth of bins in the histogram of the axial PDF (Å).
bin_width_pdf_phi : float
Binwidth of bins in the histogram of the angular PDF (Å).
drwidth : float
radial width of a PDF cylindrical shell (Å), and axial or angular (arc) slices.
dmin: float
the minimum pairwise distance between 'g1' and 'g2' (Å).
dmax : float
the maximum pairwise distance between 'g1' and 'g2' (Å).
density : bool
normalise the PDF by the density of 'g2' (:math:`Å^{-3}`).
origin : numpy.ndarray
Set origin of the cylindrical coordinate system (x,y,z). If :obj:`None` the
origin will be set according to the ``refgroup`` parameter.
${CYLINDER_CLASS_PARAMETERS}
${BIN_METHOD_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${CYLINDER_CLASS_ATTRIBUTES}
results.phi_bins: numpy.ndarray
Angular distances to which the PDF is calculated with shape (`pdf_nbins`) (Å)
results.z_bins: numpy.ndarray
axial distances to which the PDF is calculated with shape (`pdf_nbins`) (Å)
results.phi_pdf: numpy.ndarray
Angular PDF with shape (`pdf_nbins`, `n_bins`) (:math:`\text{Å}^{-3}`)
results.z_pdf: numpy.ndarray
Axial PDF with shape (`pdf_nbins`, `n_bins`) (:math:`\text{Å}^{-3}`)
'''
def __init__(self, g1: mda.AtomGroup, g2: mda.AtomGroup | None=None, bin_width_pdf_z: float=0.3, bin_width_pdf_phi: float=0.1, drwidth: float=0.1, dmin: float | None=None, dmax: float | None=None, density: bool=False, origin: np.ndarray | None=None, dim: int=2, zmin: float | None=None, zmax: float | None=None, rmin: float=0, rmax: float | None=None, bin_width: float=1, bin_method: str='com', refgroup: mda.AtomGroup | None=None, unwrap: bool=False, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='pdf.dat') -> None:
pass
def _prepare(self) -> None:
pass
def _single_frame(self) -> None:
pass
def _conclude(self) -> None:
pass
@render_docs
def save(self) -> None:
'''${SAVE_METHOD_DESCRIPTION}'''
pass
| 8
| 2
| 63
| 7
| 51
| 6
| 3
| 0.34
| 1
| 8
| 0
| 0
| 5
| 16
| 5
| 28
| 392
| 52
| 257
| 78
| 226
| 87
| 99
| 53
| 93
| 8
| 4
| 2
| 17
|
328,325
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/pdfplanar.py
|
maicos.modules.pdfplanar.PDFPlanar
|
from MDAnalysis.lib.distances import capped_distance
from ..core import PlanarBase
import numpy as np
import logging
import MDAnalysis as mda
from ..lib.util import get_center, get_compound, render_docs
@render_docs
class PDFPlanar(PlanarBase):
"""Slab-wise planar 2D pair distribution functions.
The pair distribution function :math:`g_\\mathrm{2D}(r)` describes the
spatial correlation between atoms in :math:`g_1` and atoms in
:math:`g_2`, which lie in the same plane.
It gives the average number density of :math:`g_2` atoms as a function of lateral
distance :math:`r` from a centered :math:`g_1` atom.
PDFPlanar can be used in systems that are inhomogeneous along one axis,
and homogeneous in a plane.
In fully homogeneous systems and in the limit of small 'dzheight'
:math:`\\Delta z`, it is the same as the well known three dimensional PDF.
The planar PDF is defined by
.. math::
g_\\mathrm{2D}(r) = \\left \\langle
\\frac{1}{N_{g1}} \\cdot \\sum_{i}^{N_{g1}} \\sum_{j}^{N_{g2}}
\\frac{1}{2 \\pi r} \\delta(r - r_{ij}) \\delta(z_{ij})
\\right \\rangle .
where the brackets :math:`\\langle \\cdot \\rangle` denote the ensemble
average. :math:`\\delta(r- r_{ij})` counts the :math:`g_2` atoms at distance
:math:`r` from atom :math:`i`.
:math:`\\delta(z_{ij})` ensures that only atoms, which lie
in the same plane :math:`z_i = z_j`, are considered for the PDF.
Discretized for computational purposes the equation reads as
.. math::
g_\\mathrm{2D}(r) =
\\frac{1}{N_{g1}} \\cdot \\sum_{i}^{N_{g1}} \\frac{\\mathrm{count}\\; g_2 \\;
\\mathrm{in}\\; \\Delta V_i(r) }{\\Delta V_i(r)} .
where :math:`\\Delta V_i(r)` is a ring around atom i, with inner
radius :math:`r - \\frac{\\Delta r}{2}`, outer radius
:math:`r + \\frac{\\Delta r}{2}` and height :math:`2 \\Delta z`.
As the density to normalise the PDF with is unknown, the output is in
the dimension of number/volume in 1/Å^3.
Functionally, PDFPlanar bins all pairwise :math:`g_1`-:math:`g_2` distances,
where the z distance is smaller than 'dzheight' in a histogram.
For a more detailed explanation refer to
:ref:`Explanation: PDF<pdfs-explanation>` and
:ref:`PDFPlanar Derivation<pdfplanar-derivation>`
Parameters
----------
${PDF_PARAMETERS}
pdf_bin_width : float
Binwidth of bins in the histogram of the PDF (Å).
dzheight : float
dz height of a PDF slab :math:`\\Delta z` (Å). :math:`\\Delta z` is
introduced to discretize the delta function :math:`\\delta(z_{ij})`.
It is the maximum :math:`z` distance between atoms which are
considered to lie in the same plane.
In the limit of :math:`\\Delta z \\to 0`, PDFPlanar reaches the
continous limit. However, if :math:`\\Delta z` is too small, there
are no atoms in ``g2`` to sample.
We recommend a choice of :math:`\\Delta z` that is 1/10th of
a bond length.
dmin : float
Minimum pairwise distance between ``g1`` and ``g2`` (Å).
dmax : float
Maximum pairwise distance between ``g1`` and ``g2`` (Å).
${PLANAR_CLASS_PARAMETERS}
${BIN_METHOD_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PLANAR_CLASS_ATTRIBUTES}
results.bins: numpy.ndarray
distances to which the PDF is calculated with shape (pdf_nbins) (Å)
results.pdf: np.ndrray
PDF with shape (pdf_nbins, n_bins) (1/Å^3)
"""
def __init__(self, g1: mda.AtomGroup, g2: mda.AtomGroup | None=None, pdf_bin_width: float=0.3, dzheight: float=0.1, dmin: float=0.0, dmax: float | None=None, dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=1, bin_method: str='com', refgroup: mda.AtomGroup | None=None, unwrap: bool=False, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='pdf.dat') -> None:
self._locals = locals()
self.comp_1 = get_compound(g1)
super().__init__(atomgroup=g1, refgroup=refgroup, unwrap=unwrap, pack=pack, concfreq=concfreq, jitter=jitter, dim=dim, zmin=zmin, zmax=zmax, bin_width=bin_width, wrap_compound=self.comp_1)
self.g1 = g1
if g2 is None:
self.g2 = g1
else:
self.g2 = g2
self.dmin = dmin
self.dmax = dmax
self.pdf_bin_width = pdf_bin_width
self.dzheight = dzheight
self.output = output
self.bin_method = bin_method.lower()
self.comp_2 = get_compound(self.g2)
def _prepare(self) -> None:
super()._prepare()
logging.info('Analysis of the planar pair distribution function.')
half_of_box_size = min(self.box_center)
if self.dmax is None:
self.dmax = min(self.box_center)
logging.info('Setting maximum range of PDF to half the box size ({self.range[1]} Å).')
elif self.dmax > min(self.box_center):
raise ValueError(f'Range of PDF exceeds half of the box size. Set to smaller than {half_of_box_size} Å.')
try:
if self.pdf_bin_width > 0:
self.pdf_nbins = int(np.ceil((self.dmax - self.dmin) / self.pdf_bin_width))
else:
raise ValueError('PDF bin_width must be a positive number.')
except TypeError as err:
raise ValueError('PDF bin_width must be a number.') from err
if self.bin_method not in ['cog', 'com', 'coc']:
raise ValueError(f'{self.bin_method} is an unknown binning method. Use `cog`, `com` or `coc`.')
logging.info(f'Using {self.pdf_nbins} pdf bins.')
self.edges = np.histogram([-1], bins=self.pdf_nbins, range=(self.dmin, self.dmax))[1]
self.results.bins = 0.5 * (self.edges[:-1] + self.edges[1:])
self._maxrange = self.dmax
def _single_frame(self) -> None:
super()._single_frame()
self._obs.n_g1 = np.zeros((self.n_bins, 1))
self._obs.count = np.zeros((self.n_bins, self.pdf_nbins))
bin_width = (self.zmax - self.zmin) / self.n_bins
g1_bin_positions = get_center(atomgroup=self.g1, bin_method=self.bin_method, compound=self.comp_1)
g2_bin_positions = get_center(atomgroup=self.g2, bin_method=self.bin_method, compound=self.comp_2)
for z_bin in range(0, self.n_bins):
z_min = self.zmin + bin_width * z_bin
z_max = self.zmin + bin_width * (z_bin + 1)
g1_in_zbin_positions = g1_bin_positions[np.logical_and(g1_bin_positions[:, self.dim] >= z_min, g1_bin_positions[:, self.dim] < z_max)]
g2_in_zbin_positions = g2_bin_positions[np.logical_and(g2_bin_positions[:, self.dim] >= z_min - self.dzheight, g2_bin_positions[:, self.dim] < z_max + self.dzheight)]
n_g1 = len(g1_in_zbin_positions)
n_g2 = len(g2_in_zbin_positions)
self._obs.n_g1[z_bin] = n_g1
z_g1 = np.copy(g1_in_zbin_positions)
z_g2 = np.copy(g2_in_zbin_positions)
z_g1[:, self.odims] = 0
z_g2[:, self.odims] = 0
z_pairs, _ = capped_distance(z_g1, z_g2, self.dzheight, box=self._universe.dimensions)
pairs, xy_distances = capped_distance(g1_in_zbin_positions, g2_in_zbin_positions, self._maxrange, box=self._universe.dimensions)
z_pairs_encode = z_pairs[:, 0] + n_g2 * z_pairs[:, 1]
pairs_encode = pairs[:, 0] + n_g2 * pairs[:, 1]
mask_in_dz = np.isin(pairs_encode, z_pairs_encode)
mask_different_atoms = np.where(xy_distances > 0, True, False)
relevant_xy_distances = xy_distances[mask_in_dz * mask_different_atoms]
self._obs.count[z_bin] = np.histogram(relevant_xy_distances, bins=self.pdf_nbins, range=(self.dmin, self.dmax))[0]
def _conclude(self) -> None:
super()._conclude()
ring_volumes = np.pi * (self.edges[1:] ** 2 - self.edges[:-1] ** 2) * 2 * self.dzheight
ring_volumes = np.expand_dims(ring_volumes, axis=0)
self.results.bins = self.results.bins
self.results.pdf = self.means.count / self.means.n_g1 / ring_volumes
self.results.pdf = np.nan_to_num(self.results.pdf.T, nan=0)
@render_docs
def save(self) -> None:
"""${SAVE_METHOD_DESCRIPTION}"""
columns = ['r [Å]']
for z in self.results.bin_pos:
columns.append(f'pdf at {z:.2f} Å [Å^-3]')
self.savetxt(self.output, np.hstack([self.results.bins[:, np.newaxis], self.results.pdf]), columns=columns)
|
@render_docs
class PDFPlanar(PlanarBase):
'''Slab-wise planar 2D pair distribution functions.
The pair distribution function :math:`g_\mathrm{2D}(r)` describes the
spatial correlation between atoms in :math:`g_1` and atoms in
:math:`g_2`, which lie in the same plane.
It gives the average number density of :math:`g_2` atoms as a function of lateral
distance :math:`r` from a centered :math:`g_1` atom.
PDFPlanar can be used in systems that are inhomogeneous along one axis,
and homogeneous in a plane.
In fully homogeneous systems and in the limit of small 'dzheight'
:math:`\Delta z`, it is the same as the well known three dimensional PDF.
The planar PDF is defined by
.. math::
g_\mathrm{2D}(r) = \left \langle
\frac{1}{N_{g1}} \cdot \sum_{i}^{N_{g1}} \sum_{j}^{N_{g2}}
\frac{1}{2 \pi r} \delta(r - r_{ij}) \delta(z_{ij})
\right \rangle .
where the brackets :math:`\langle \cdot \rangle` denote the ensemble
average. :math:`\delta(r- r_{ij})` counts the :math:`g_2` atoms at distance
:math:`r` from atom :math:`i`.
:math:`\delta(z_{ij})` ensures that only atoms, which lie
in the same plane :math:`z_i = z_j`, are considered for the PDF.
Discretized for computational purposes the equation reads as
.. math::
g_\mathrm{2D}(r) =
\frac{1}{N_{g1}} \cdot \sum_{i}^{N_{g1}} \frac{\mathrm{count}\; g_2 \;
\mathrm{in}\; \Delta V_i(r) }{\Delta V_i(r)} .
where :math:`\Delta V_i(r)` is a ring around atom i, with inner
radius :math:`r - \frac{\Delta r}{2}`, outer radius
:math:`r + \frac{\Delta r}{2}` and height :math:`2 \Delta z`.
As the density to normalise the PDF with is unknown, the output is in
the dimension of number/volume in 1/Å^3.
Functionally, PDFPlanar bins all pairwise :math:`g_1`-:math:`g_2` distances,
where the z distance is smaller than 'dzheight' in a histogram.
For a more detailed explanation refer to
:ref:`Explanation: PDF<pdfs-explanation>` and
:ref:`PDFPlanar Derivation<pdfplanar-derivation>`
Parameters
----------
${PDF_PARAMETERS}
pdf_bin_width : float
Binwidth of bins in the histogram of the PDF (Å).
dzheight : float
dz height of a PDF slab :math:`\Delta z` (Å). :math:`\Delta z` is
introduced to discretize the delta function :math:`\delta(z_{ij})`.
It is the maximum :math:`z` distance between atoms which are
considered to lie in the same plane.
In the limit of :math:`\Delta z \to 0`, PDFPlanar reaches the
continous limit. However, if :math:`\Delta z` is too small, there
are no atoms in ``g2`` to sample.
We recommend a choice of :math:`\Delta z` that is 1/10th of
a bond length.
dmin : float
Minimum pairwise distance between ``g1`` and ``g2`` (Å).
dmax : float
Maximum pairwise distance between ``g1`` and ``g2`` (Å).
${PLANAR_CLASS_PARAMETERS}
${BIN_METHOD_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PLANAR_CLASS_ATTRIBUTES}
results.bins: numpy.ndarray
distances to which the PDF is calculated with shape (pdf_nbins) (Å)
results.pdf: np.ndrray
PDF with shape (pdf_nbins, n_bins) (1/Å^3)
'''
def __init__(self, g1: mda.AtomGroup, g2: mda.AtomGroup | None=None, pdf_bin_width: float=0.3, dzheight: float=0.1, dmin: float=0.0, dmax: float | None=None, dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=1, bin_method: str='com', refgroup: mda.AtomGroup | None=None, unwrap: bool=False, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='pdf.dat') -> None:
pass
def _prepare(self) -> None:
pass
def _single_frame(self) -> None:
pass
def _conclude(self) -> None:
pass
@render_docs
def save(self) -> None:
'''${SAVE_METHOD_DESCRIPTION}'''
pass
| 8
| 2
| 37
| 4
| 30
| 3
| 3
| 0.53
| 1
| 8
| 0
| 0
| 5
| 14
| 5
| 23
| 273
| 42
| 151
| 64
| 125
| 80
| 75
| 43
| 69
| 6
| 3
| 2
| 13
|
328,326
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/rdfdiporder.py
|
maicos.modules.rdfdiporder.RDFDiporder
|
from ..lib.util import get_center, render_docs
from ..lib.weights import diporder_pair_weights
import MDAnalysis as mda
from MDAnalysis.lib import distances
import logging
from ..core import AnalysisBase
import numpy as np
@render_docs
class RDFDiporder(AnalysisBase):
"""Spherical Radial Distribution function between dipoles.
The implementation is heavily inspired by :class:`MDAnalysis.analysis.rdf.InterRDF`
and is according to :footcite:t:`zhang_dipolar_2014` given by
.. math::
g_\\mathrm{\\hat{\\boldsymbol{\\mu}}, \\hat{\\boldsymbol{\\mu}}}(r) = \\frac{1}{N}
\\left\\langle \\sum_i \\frac{1}{n_i(r)} \\sum_{j=1}^{n_i(r)}
(\\hat{\\boldsymbol{\\mu}}_i \\cdot \\hat{\\boldsymbol{\\mu}}_j) \\right \\rangle
where :math:`\\hat{\\boldsymbol{\\mu}}` is the normalized dipole moment of a
``grouping`` and :math:`n_i(r)` is the number of dipoles within a spherical shell of
distance :math:`r` and :math:`r + \\delta r` from dipole :math:`i`.
For the correlation time estimation the module will use the value of the RDF with
the largest possible :math:`r` value.
For an detailed example on the usage refer to the :ref:`how-to on dipolar
correlation functions <howto-spatial-dipole-dipole-correlations>`.
Parameters
----------
${PDF_PARAMETERS}
norm : str, {'rdf', 'density', 'none'}
For 'rdf' calculate :math:`g_{ab}(r)`. For 'density' the single group
density :math:`n_{ab}(r)` is computed. 'none' computes the number of
particles occurences in each spherical shell.
${RADIAL_CLASS_PARAMETERS}
${BIN_WIDTH_PARAMETER}
${BIN_METHOD_PARAMETER}
${GROUPING_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
results.bins: numpy.ndarray
radial distances to which the RDF is calculated with shape (``rdf_nbins``) (Å)
results.rdf: numpy.ndarray
RDF either in :math:`\\text{eÅ}^{-2}` if norm is ``"rdf"`` or ``"density"`` or
:math:`\\text{eÅ}` if norm is ``"none"``.
"""
def __init__(self, g1: mda.AtomGroup, g2: mda.AtomGroup | None=None, norm: str='rdf', rmin: float=0.0, rmax: float=15.0, bin_width: float=0.1, bin_method: str='com', grouping: str='residues', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='diporderrdf.dat') -> None:
self._locals = locals()
super().__init__(g1, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, wrap_compound=grouping, concfreq=concfreq)
self.g1 = g1
if g2 is None:
self.g2 = g1
else:
self.g2 = g2
self.bin_width = bin_width
self.rmin = rmin
self.rmax = rmax
self.bin_method = str(bin_method).lower()
self.norm = norm
self.output = output
def _prepare(self):
logging.info('Analysis of the spherical radial distribution function for dipoles.')
self.n_bins = int(np.ceil((self.rmax - self.rmin) / self.bin_width))
supported_norms = ['rdf', 'density', 'none']
if self.norm not in supported_norms:
raise ValueError(f"'{self.norm}' is an invalid `norm`. Choose from: {', '.join(supported_norms)}")
def _single_frame(self):
if self.unwrap:
self.g1.unwrap(compound=self.wrap_compound)
self.g2.unwrap(compound=self.wrap_compound)
pos_1 = get_center(self.g1, bin_method=self.bin_method, compound=self.wrap_compound)
pos_2 = get_center(self.g2, bin_method=self.bin_method, compound=self.wrap_compound)
pairs, dist = distances.capped_distance(pos_1, pos_2, min_cutoff=self.rmin, max_cutoff=self.rmax, box=self._ts.dimensions)
weights = diporder_pair_weights(self.g1, self.g2, compound=self.wrap_compound)
weights_sel = np.array([weights[ix[0], ix[1]] for ix in pairs])
self._obs.profile, _ = np.histogram(a=dist, bins=self.n_bins, range=(self.rmin, self.rmax), weights=weights_sel)
if self.norm == 'rdf':
self._obs.volume = self._ts.volume
return self._obs.profile[-1]
def _conclude(self):
_, edges = np.histogram(a=[-1], bins=self.n_bins, range=(self.rmin, self.rmax))
self.results.bins = 0.5 * (edges[:-1] + edges[1:])
norm = 1
if self.norm in ['rdf', 'density']:
vols = np.power(edges, 3)
norm *= 4 / 3 * np.pi * np.diff(vols)
if self.norm == 'rdf':
if self.wrap_compound != 'molecules':
nA = getattr(self.g1, f'n_{self.wrap_compound}')
nB = getattr(self.g2, f'n_{self.wrap_compound}')
else:
nA = len(np.unique(self.g1.molnums))
nB = len(np.unique(self.g1.molnums))
N = nA * nB
norm *= N / self.means.volume
self.results.rdf = self.means.profile / norm
@render_docs
def save(self) -> None:
"""${SAVE_METHOD_DESCRIPTION}"""
columns = ['r (Å)', 'rdf']
if self.norm in ['rdf', 'density']:
columns[1] += ' (Å^3)'
self.savetxt(self.output, np.vstack([self.results.bins, self.results.rdf]).T, columns=columns)
|
@render_docs
class RDFDiporder(AnalysisBase):
'''Spherical Radial Distribution function between dipoles.
The implementation is heavily inspired by :class:`MDAnalysis.analysis.rdf.InterRDF`
and is according to :footcite:t:`zhang_dipolar_2014` given by
.. math::
g_\mathrm{\hat{\boldsymbol{\mu}}, \hat{\boldsymbol{\mu}}}(r) = \frac{1}{N}
\left\langle \sum_i \frac{1}{n_i(r)} \sum_{j=1}^{n_i(r)}
(\hat{\boldsymbol{\mu}}_i \cdot \hat{\boldsymbol{\mu}}_j) \right \rangle
where :math:`\hat{\boldsymbol{\mu}}` is the normalized dipole moment of a
``grouping`` and :math:`n_i(r)` is the number of dipoles within a spherical shell of
distance :math:`r` and :math:`r + \delta r` from dipole :math:`i`.
For the correlation time estimation the module will use the value of the RDF with
the largest possible :math:`r` value.
For an detailed example on the usage refer to the :ref:`how-to on dipolar
correlation functions <howto-spatial-dipole-dipole-correlations>`.
Parameters
----------
${PDF_PARAMETERS}
norm : str, {'rdf', 'density', 'none'}
For 'rdf' calculate :math:`g_{ab}(r)`. For 'density' the single group
density :math:`n_{ab}(r)` is computed. 'none' computes the number of
particles occurences in each spherical shell.
${RADIAL_CLASS_PARAMETERS}
${BIN_WIDTH_PARAMETER}
${BIN_METHOD_PARAMETER}
${GROUPING_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
results.bins: numpy.ndarray
radial distances to which the RDF is calculated with shape (``rdf_nbins``) (Å)
results.rdf: numpy.ndarray
RDF either in :math:`\text{eÅ}^{-2}` if norm is ``"rdf"`` or ``"density"`` or
:math:`\text{eÅ}` if norm is ``"none"``.
'''
def __init__(self, g1: mda.AtomGroup, g2: mda.AtomGroup | None=None, norm: str='rdf', rmin: float=0.0, rmax: float=15.0, bin_width: float=0.1, bin_method: str='com', grouping: str='residues', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='diporderrdf.dat') -> None:
pass
def _prepare(self):
pass
def _single_frame(self):
pass
def _conclude(self):
pass
@render_docs
def save(self) -> None:
'''${SAVE_METHOD_DESCRIPTION}'''
pass
| 8
| 2
| 25
| 3
| 21
| 1
| 3
| 0.37
| 1
| 6
| 0
| 0
| 5
| 10
| 5
| 17
| 173
| 29
| 105
| 47
| 82
| 39
| 54
| 30
| 48
| 4
| 2
| 2
| 13
|
328,327
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/saxs.py
|
maicos.modules.saxs.Saxs
|
from ..lib.util import render_docs
import logging
import MDAnalysis as mda
from ..core import AnalysisBase
import numpy as np
from ..lib.math import atomic_form_factor, structure_factor
@render_docs
class Saxs(AnalysisBase):
"""Small angle X-Ray scattering intensities (SAXS).
This module computes the structure factor :math:`S(q)`, the scattering intensity
(sometimes also called scattering factor) :math:`I(q)` and their corresponding
scattering vectors :math:`q`. For a system containing only one element the structure
factor and the scattering intensity are connected via the atomic form factor
:math:`f(q)`
.. math::
I(q) = [f(q)]^2 S(q)
For more details on the theory behind this module see :ref:`saxs-explanations`.
By default the scattering vectors :math:`\\boldsymbol{q}` are binned according to
their length :math:`q` using a bin width given by ``dq``. Setting the option
``bin_spectrum=False``, also the raw scattering vectors and their corresponding
Miller indices can be saved. Saving the scattering vectors and Miller indices is
only possible when the box vectors are constant in the whole trajectory (NVT) since
for changing cells the same Miller indices correspond to different scattering
vectors.
.. warning::
Please be aware that in simulations where the box vectors change, the q-vectors
will differ between frames. Artifacts can arise when the data contains poorly
sampled q-vectors.
Analyzed scattering vectors :math:`q` can be restricted by a minimal and maximal
angle with the z-axis. For ``0`` and ``180``, all possible vectors are taken into
account. To obtain the scattering intensities, the structure factor is normalized by
an element-specific atomic form factor based on Cromer-Mann parameters
:footcite:t:`princeInternationalTablesCrystallography2004`.
For the correlation time estimation the module will use the value of the scattering
intensity with the largest possible :math:`q` value.
For an example on the usage refer to :ref:`How-to: SAXS<howto-saxs>`.
Parameters
----------
${ATOMGROUP_PARAMETER}
bin_spectrum : bool
Bin the spectrum. If :py:obj:`False` Miller indices of q-vector are returned.
Only works for NVT simulations.
${Q_SPACE_PARAMETERS}
thetamin : float
Minimal angle (°) between the q vectors and the z-axis.
thetamax : float
Maximal angle (°) between the q vectors and the z-axis.
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
results.scattering_vectors : numpy.ndarray
Length of the binned scattering vectors.
results.miller_indices : numpy.ndarray
Miller indices of q-vector (only available if ``bin_spectrum==False``).
results.struture_factors : numpy.ndarray
structure factors :math:`S(q)`
results.scattering_intensities : numpy.ndarray
scattering intensities :math:`I(q)`
results.dstruture_factors : numpy.ndarray
standard error of the structure factors :math:`S(q)`
(only available if ``bin_spectrum==True``).
structure factors :math:`S(q)`
results.dscattering_intensities : numpy.ndarray
standard error of the scattering intensities :math:`I(q)`
(only available if ``bin_spectrum==True``).
"""
def __init__(self, atomgroup: mda.AtomGroup, bin_spectrum: bool=True, qmin: float=0, qmax: float=6, dq: float=0.1, thetamin: float=0, thetamax: float=180, refgroup: mda.AtomGroup | None=None, unwrap: bool=False, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='sq.dat') -> None:
self._locals = locals()
super().__init__(atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, concfreq=concfreq, wrap_compound='atoms')
self.bin_spectrum = bin_spectrum
self.qmin = qmin
self.qmax = qmax
self.dq = dq
self.thetamin = thetamin
self.thetamax = thetamax
self.output = output
def _prepare(self) -> None:
logging.info('Analysis of small angle X-ray scattering intensities (SAXS).')
self.thetamin = min(self.thetamin, self.thetamax)
self.thetamax = max(self.thetamin, self.thetamax)
if self.thetamin < 0 or self.thetamin > 180:
raise ValueError(f'thetamin ({self.thetamin}°) has to between 0 and 180°.')
if self.thetamax < 0 or self.thetamax > 180:
raise ValueError(f'thetamax ({self.thetamax}°) has to between 0 and 180°.')
if self.thetamin > self.thetamax:
raise ValueError(f'thetamin ({self.thetamin}°) larger than thetamax ({self.thetamax}°).')
self.thetamin *= np.pi / 180
self.thetamax *= np.pi / 180
self.groups = []
self.weights = []
self.elements = []
for element in np.unique(self.atomgroup.elements):
group = self.atomgroup.select_atoms(f'element {element}')
self.groups.append(group)
self.weights.append(np.ones(group.n_atoms))
self.elements.append(element)
if self.bin_spectrum:
self.n_bins = int(np.ceil((self.qmax - self.qmin) / self.dq))
else:
self.box = np.diag(mda.lib.mdamath.triclinic_vectors(self._universe.dimensions))
self.scattering_vector_factors = 2 * np.pi / self.box
self.max_n = np.ceil(self.qmax / self.scattering_vector_factors).astype(int)
def _single_frame(self) -> float:
box = np.diag(mda.lib.mdamath.triclinic_vectors(self._ts.dimensions))
if self.bin_spectrum:
self._obs.structure_factors = np.zeros(self.n_bins)
self._obs.scattering_intensities = np.zeros(self.n_bins)
else:
if not np.all(box == self.box):
raise ValueError(f'Dimensions in frame {self.frame_index} are different from initial dimenions. Can not use `bin_spectrum=False`.')
self._obs.structure_factors = np.zeros(self.max_n)
self._obs.scattering_intensities = np.zeros(self.max_n)
for i_group, group in enumerate(self.groups):
positions = group.atoms.positions - box * np.round(group.atoms.positions / box)
scattering_vectors, structure_factors = structure_factor(np.double(positions), np.double(box), self.qmin, self.qmax, self.thetamin, self.thetamax, self.weights[i_group])
scattering_intensities = atomic_form_factor(scattering_vectors, self.elements[i_group]) ** 2 * structure_factors
if self.bin_spectrum:
scattering_vectors = scattering_vectors.flatten()
structure_factors = structure_factors.flatten()
scattering_intensities = scattering_intensities.flatten()
nonzeros = np.where(structure_factors != 0)[0]
scattering_vectors = scattering_vectors[nonzeros]
structure_factors = structure_factors[nonzeros]
scattering_intensities = scattering_intensities[nonzeros]
histogram_kwargs = dict(a=scattering_vectors, bins=self.n_bins, range=(self.qmin, self.qmax))
structure_factors, _ = np.histogram(weights=structure_factors, **histogram_kwargs)
scattering_intensities, _ = np.histogram(weights=scattering_intensities, **histogram_kwargs)
self._obs.bincount, _ = np.histogram(weights=None, **histogram_kwargs)
self._obs.structure_factors += structure_factors
self._obs.scattering_intensities += scattering_intensities
else:
self._obs.structure_factors += structure_factors
self._obs.scattering_intensities += scattering_intensities
return structure_factors.flatten()[-1]
def _conclude(self) -> None:
if self.bin_spectrum:
scattering_vectors = np.arange(self.qmin, self.qmax, self.dq) + 0.5 * self.dq
structure_factors = self.sums.structure_factors / self.sums.bincount
scattering_intensities = self.sums.scattering_intensities / self.sums.bincount
dstructure_factors = self.sems.structure_factors
dscattering_intensities = self.sems.scattering_intensities
else:
miller_indices = np.array(list(np.ndindex(tuple(self.max_n))))
scattering_vectors = np.linalg.norm(miller_indices * self.scattering_vector_factors[np.newaxis, :], axis=1)
structure_factors = self.means.structure_factors
scattering_intensities = self.means.scattering_intensities
structure_factors = structure_factors.flatten()
scattering_intensities = scattering_intensities.flatten()
argsort = np.argsort(scattering_vectors)
scattering_vectors = scattering_vectors[argsort]
miller_indices = miller_indices[argsort]
structure_factors = structure_factors[argsort]
scattering_intensities = scattering_intensities[argsort]
nonzeros = np.invert(np.isnan(structure_factors))
scattering_vectors = scattering_vectors[nonzeros]
structure_factors = structure_factors[nonzeros]
scattering_intensities = scattering_intensities[nonzeros]
if self.bin_spectrum:
dstructure_factors = dstructure_factors[nonzeros]
dscattering_intensities = dscattering_intensities[nonzeros]
structure_factors /= self.atomgroup.n_atoms
scattering_intensities /= self.atomgroup.n_atoms
if self.bin_spectrum:
dstructure_factors /= self.atomgroup.n_atoms
dscattering_intensities /= self.atomgroup.n_atoms
self.results.scattering_vectors = scattering_vectors
self.results.structure_factors = structure_factors
self.results.scattering_intensities = scattering_intensities
if self.bin_spectrum:
self.results.dstructure_factors = dstructure_factors
self.results.dscattering_intensities = dscattering_intensities
if not self.bin_spectrum:
self.results.miller_indices = miller_indices[nonzeros]
@render_docs
def save(self) -> None:
"""${SAVE_METHOD_DESCRIPTION}"""
if self.bin_spectrum:
self.savetxt(self.output, np.vstack([self.results.scattering_vectors, self.results.structure_factors, self.results.scattering_intensities, self.results.dstructure_factors, self.results.dscattering_intensities]).T, columns=['q (1/Å)', 'S(q) (arb. units)', 'I(q) (arb. units)', 'ΔS(q)', 'ΔI(q)'])
else:
out = np.hstack([self.results.scattering_vectors[:, np.newaxis], self.results.miller_indices, self.results.structure_factors[:, np.newaxis], self.results.scattering_intensities[:, np.newaxis]])
boxinfo = 'box_x = {:.3f} Å, box_y = {:.3f} Å, box_z = {:.3f} Å\n'.format(*self.box)
self.savetxt(self.output, out, columns=[boxinfo, 'q (1/Å)', 'q_i', 'q_j', 'q_k', 'S(q) (arb. units)', 'I(q) (arb. units)'])
|
@render_docs
class Saxs(AnalysisBase):
'''Small angle X-Ray scattering intensities (SAXS).
This module computes the structure factor :math:`S(q)`, the scattering intensity
(sometimes also called scattering factor) :math:`I(q)` and their corresponding
scattering vectors :math:`q`. For a system containing only one element the structure
factor and the scattering intensity are connected via the atomic form factor
:math:`f(q)`
.. math::
I(q) = [f(q)]^2 S(q)
For more details on the theory behind this module see :ref:`saxs-explanations`.
By default the scattering vectors :math:`\boldsymbol{q}` are binned according to
their length :math:`q` using a bin width given by ``dq``. Setting the option
``bin_spectrum=False``, also the raw scattering vectors and their corresponding
Miller indices can be saved. Saving the scattering vectors and Miller indices is
only possible when the box vectors are constant in the whole trajectory (NVT) since
for changing cells the same Miller indices correspond to different scattering
vectors.
.. warning::
Please be aware that in simulations where the box vectors change, the q-vectors
will differ between frames. Artifacts can arise when the data contains poorly
sampled q-vectors.
Analyzed scattering vectors :math:`q` can be restricted by a minimal and maximal
angle with the z-axis. For ``0`` and ``180``, all possible vectors are taken into
account. To obtain the scattering intensities, the structure factor is normalized by
an element-specific atomic form factor based on Cromer-Mann parameters
:footcite:t:`princeInternationalTablesCrystallography2004`.
For the correlation time estimation the module will use the value of the scattering
intensity with the largest possible :math:`q` value.
For an example on the usage refer to :ref:`How-to: SAXS<howto-saxs>`.
Parameters
----------
${ATOMGROUP_PARAMETER}
bin_spectrum : bool
Bin the spectrum. If :py:obj:`False` Miller indices of q-vector are returned.
Only works for NVT simulations.
${Q_SPACE_PARAMETERS}
thetamin : float
Minimal angle (°) between the q vectors and the z-axis.
thetamax : float
Maximal angle (°) between the q vectors and the z-axis.
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
results.scattering_vectors : numpy.ndarray
Length of the binned scattering vectors.
results.miller_indices : numpy.ndarray
Miller indices of q-vector (only available if ``bin_spectrum==False``).
results.struture_factors : numpy.ndarray
structure factors :math:`S(q)`
results.scattering_intensities : numpy.ndarray
scattering intensities :math:`I(q)`
results.dstruture_factors : numpy.ndarray
standard error of the structure factors :math:`S(q)`
(only available if ``bin_spectrum==True``).
structure factors :math:`S(q)`
results.dscattering_intensities : numpy.ndarray
standard error of the scattering intensities :math:`I(q)`
(only available if ``bin_spectrum==True``).
'''
def __init__(self, atomgroup: mda.AtomGroup, bin_spectrum: bool=True, qmin: float=0, qmax: float=6, dq: float=0.1, thetamin: float=0, thetamax: float=180, refgroup: mda.AtomGroup | None=None, unwrap: bool=False, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='sq.dat') -> None:
pass
def _prepare(self) -> None:
pass
def _single_frame(self) -> float:
pass
def _conclude(self) -> None:
pass
@render_docs
def save(self) -> None:
'''${SAVE_METHOD_DESCRIPTION}'''
pass
| 8
| 2
| 49
| 6
| 42
| 2
| 4
| 0.33
| 1
| 11
| 0
| 0
| 5
| 15
| 5
| 17
| 326
| 46
| 212
| 57
| 190
| 71
| 110
| 41
| 104
| 6
| 2
| 2
| 20
|
328,328
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/temperatureplanar.py
|
maicos.modules.temperatureplanar.TemperaturePlanar
|
import MDAnalysis as mda
from ..core import ProfilePlanarBase
from ..lib.weights import temperature_weights
from ..lib.util import render_docs
import logging
@render_docs
class TemperaturePlanar(ProfilePlanarBase):
"""Temperature profiles in a cartesian geometry.
Currently only atomistic temperature profiles are supported. Therefore grouping per
molecule, segment, residue, or fragment is not possible.
${CORRELATION_INFO_PLANAR}
Parameters
----------
${ATOMGROUP_PARAMETER}
${PROFILE_PLANAR_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_PLANAR_CLASS_ATTRIBUTES}
"""
def __init__(self, atomgroup: mda.AtomGroup, dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='atoms', sym: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='temperature.dat') -> None:
self._locals = locals()
if grouping != 'atoms':
raise ValueError('Invalid choice of grouping, must use atoms')
super().__init__(atomgroup=atomgroup, unwrap=unwrap, pack=pack, jitter=jitter, concfreq=concfreq, dim=dim, zmin=zmin, zmax=zmax, bin_width=bin_width, refgroup=refgroup, sym=sym, sym_odd=False, grouping=grouping, bin_method=bin_method, output=output, weighting_function=temperature_weights, weighting_function_kwargs=None, normalization='number')
def _prepare(self):
logging.info('Analysis of temperature profiles.')
super()._prepare()
|
@render_docs
class TemperaturePlanar(ProfilePlanarBase):
'''Temperature profiles in a cartesian geometry.
Currently only atomistic temperature profiles are supported. Therefore grouping per
molecule, segment, residue, or fragment is not possible.
${CORRELATION_INFO_PLANAR}
Parameters
----------
${ATOMGROUP_PARAMETER}
${PROFILE_PLANAR_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_PLANAR_CLASS_ATTRIBUTES}
'''
def __init__(self, atomgroup: mda.AtomGroup, dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='atoms', sym: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='temperature.dat') -> None:
pass
def _prepare(self):
pass
| 4
| 1
| 22
| 1
| 22
| 0
| 2
| 0.3
| 1
| 6
| 0
| 0
| 2
| 1
| 2
| 31
| 65
| 8
| 44
| 20
| 25
| 13
| 9
| 4
| 6
| 2
| 4
| 1
| 3
|
328,329
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/velocitycylinder.py
|
maicos.modules.velocitycylinder.VelocityCylinder
|
from ..lib.util import render_docs
from ..lib.weights import velocity_weights
import logging
import MDAnalysis as mda
from ..core import ProfileCylinderBase
@render_docs
class VelocityCylinder(ProfileCylinderBase):
"""Cartesian velocity profile across a cylinder.
Reads in coordinates and velocities from a trajectory and calculates a velocity
:math:`[\\mathrm{Å/ps}]` or a flux per unit area :math:`[\\mathrm{Å^{-2}\\,ps^{-1}}]`
profile along a given axis.
The ``grouping`` keyword gives you fine control over the velocity profile, e.g. you
can choose atomar or molecular velocities. Note that if the first one is employed
for complex compounds, usually a contribution corresponding to the vorticity appears
in the profile.
${CORRELATION_INFO_RADIAL}
Parameters
----------
${ATOMGROUP_PARAMETER}
${VDIM_PARAMETER}
${FLUX_PARAMETER}
${PROFILE_CYLINDER_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_CYLINDER_CLASS_ATTRIBUTES}
"""
def __init__(self, atomgroup: mda.AtomGroup, vdim: int=0, flux: bool=False, dim: int=2, zmin: float | None=None, zmax: float | None=None, rmin: float=0, rmax: float | None=None, bin_width: int=1, bin_method: str='com', grouping: str='atoms', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='velocity.dat') -> None:
self._locals = locals()
if vdim not in [0, 1, 2]:
raise ValueError('Velocity dimension can only be x=0, y=1 or z=2.')
normalization = 'volume' if flux else 'number'
super().__init__(atomgroup=atomgroup, unwrap=unwrap, pack=pack, jitter=jitter, concfreq=concfreq, dim=dim, zmin=zmin, zmax=zmax, bin_width=bin_width, rmin=rmin, rmax=rmax, refgroup=refgroup, grouping=grouping, bin_method=bin_method, output=output, weighting_function=velocity_weights, weighting_function_kwargs={'vdim': vdim}, normalization=normalization)
def _prepare(self):
logging.info('Analysis of the velocity profile.')
super()._prepare()
|
@render_docs
class VelocityCylinder(ProfileCylinderBase):
'''Cartesian velocity profile across a cylinder.
Reads in coordinates and velocities from a trajectory and calculates a velocity
:math:`[\mathrm{Å/ps}]` or a flux per unit area :math:`[\mathrm{Å^{-2}\,ps^{-1}}]`
profile along a given axis.
The ``grouping`` keyword gives you fine control over the velocity profile, e.g. you
can choose atomar or molecular velocities. Note that if the first one is employed
for complex compounds, usually a contribution corresponding to the vorticity appears
in the profile.
${CORRELATION_INFO_RADIAL}
Parameters
----------
${ATOMGROUP_PARAMETER}
${VDIM_PARAMETER}
${FLUX_PARAMETER}
${PROFILE_CYLINDER_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_CYLINDER_CLASS_ATTRIBUTES}
'''
def __init__(self, atomgroup: mda.AtomGroup, vdim: int=0, flux: bool=False, dim: int=2, zmin: float | None=None, zmax: float | None=None, rmin: float=0, rmax: float | None=None, bin_width: int=1, bin_method: str='com', grouping: str='atoms', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='velocity.dat') -> None:
pass
def _prepare(self):
pass
| 4
| 1
| 24
| 1
| 24
| 0
| 2
| 0.42
| 1
| 6
| 0
| 0
| 2
| 1
| 2
| 36
| 77
| 9
| 48
| 24
| 26
| 20
| 10
| 5
| 7
| 3
| 5
| 1
| 4
|
328,330
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/velocityplanar.py
|
maicos.modules.velocityplanar.VelocityPlanar
|
from ..core import ProfilePlanarBase
import MDAnalysis as mda
import logging
from ..lib.util import render_docs
from ..lib.weights import velocity_weights
@render_docs
class VelocityPlanar(ProfilePlanarBase):
"""Velocity profiles in a cartesian geometry.
Reads in coordinates and velocities from a trajectory and calculates a velocity
:math:`[\\mathrm{Å/ps}]` or a flux per unit area :math:`[\\mathrm{Å^{-2}\\,ps^{-1}}]`
profile along a given axis.
The ``grouping`` keyword gives you fine control over the velocity profile, e.g. you
can choose atomar or molecular velocities. Note that if the first one is employed
for complex compounds, usually a contribution corresponding to the vorticity appears
in the profile.
${CORRELATION_INFO_PLANAR}
Parameters
----------
${ATOMGROUP_PARAMETER}
sym_odd : bool,
Parity of the profile. If :obj:`False`, the profile will be symmetrized. If
:obj:`True`, the profile is antisymmetrized. Only relevant in combination with
``sym``.
${VDIM_PARAMETER}
${FLUX_PARAMETER}
${PROFILE_PLANAR_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_PLANAR_CLASS_ATTRIBUTES}
"""
def __init__(self, atomgroup: mda.AtomGroup, sym_odd: bool=False, vdim: int=0, flux: bool=False, dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=1.0, bin_method: str='com', grouping: str='atoms', sym: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='velocity.dat') -> None:
self._locals = locals()
if vdim not in [0, 1, 2]:
raise ValueError('Velocity dimension can only be x=0, y=1 or z=2.')
normalization = 'volume' if flux else 'number'
super().__init__(atomgroup=atomgroup, unwrap=unwrap, pack=pack, jitter=jitter, concfreq=concfreq, dim=dim, zmin=zmin, zmax=zmax, bin_width=bin_width, refgroup=refgroup, sym=sym, sym_odd=sym_odd, grouping=grouping, bin_method=bin_method, output=output, weighting_function=velocity_weights, weighting_function_kwargs={'vdim': vdim}, normalization=normalization)
def _prepare(self):
logging.info('Analysis of the velocity profile.')
super()._prepare()
|
@render_docs
class VelocityPlanar(ProfilePlanarBase):
'''Velocity profiles in a cartesian geometry.
Reads in coordinates and velocities from a trajectory and calculates a velocity
:math:`[\mathrm{Å/ps}]` or a flux per unit area :math:`[\mathrm{Å^{-2}\,ps^{-1}}]`
profile along a given axis.
The ``grouping`` keyword gives you fine control over the velocity profile, e.g. you
can choose atomar or molecular velocities. Note that if the first one is employed
for complex compounds, usually a contribution corresponding to the vorticity appears
in the profile.
${CORRELATION_INFO_PLANAR}
Parameters
----------
${ATOMGROUP_PARAMETER}
sym_odd : bool,
Parity of the profile. If :obj:`False`, the profile will be symmetrized. If
:obj:`True`, the profile is antisymmetrized. Only relevant in combination with
``sym``.
${VDIM_PARAMETER}
${FLUX_PARAMETER}
${PROFILE_PLANAR_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_PLANAR_CLASS_ATTRIBUTES}
'''
def __init__(self, atomgroup: mda.AtomGroup, sym_odd: bool=False, vdim: int=0, flux: bool=False, dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=1.0, bin_method: str='com', grouping: str='atoms', sym: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='velocity.dat') -> None:
pass
def _prepare(self):
pass
| 4
| 1
| 24
| 1
| 24
| 0
| 2
| 0.5
| 1
| 6
| 0
| 0
| 2
| 1
| 2
| 31
| 81
| 9
| 48
| 24
| 26
| 24
| 10
| 5
| 7
| 3
| 4
| 1
| 4
|
328,331
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/versioneer.py
|
versioneer.NotThisMethod
|
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
|
class NotThisMethod(Exception):
'''Exception raised if a method is not valid for the current scenario.'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 2
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 3
| 0
| 0
|
328,332
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/versioneer.py
|
versioneer.VersioneerBadRootError
|
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
|
class VersioneerBadRootError(Exception):
'''The project root directory is unknown or missing key files.'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 2
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 3
| 0
| 0
|
328,333
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/versioneer.py
|
versioneer.VersioneerConfig
|
from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
VCS: str
style: str
tag_prefix: str
versionfile_source: str
versionfile_build: Optional[str]
parentdir_prefix: Optional[str]
verbose: Optional[bool]
|
class VersioneerConfig:
'''Container for Versioneer configuration parameters.'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0.13
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 1
| 8
| 1
| 7
| 1
| 8
| 1
| 7
| 0
| 0
| 0
| 0
|
328,334
|
NBBotz/Auto-Filter-Bot
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/NBBotz_Auto-Filter-Bot/Lucia/Bot/__init__.py
|
Bot.SilentXBot
|
from typing import Union, Optional, AsyncGenerator
from pyrogram import Client
class SilentXBot(Client):
def __init__(self):
super().__init__(name=SESSION, api_id=API_ID, api_hash=API_HASH, bot_token=BOT_TOKEN, workers=50, plugins={'root': 'plugins'}, sleep_threshold=5)
async def iter_messages(self, chat_id: Union[int, str], limit: int, offset: int=0) -> Optional[AsyncGenerator['types.Message', None]]:
"""Iterate through a chat sequentially.
This convenience method does the same as repeatedly calling :meth:`~pyrogram.Client.get_messages` in a loop, thus saving
you from the hassle of setting up boilerplate code. It is useful for getting the whole chat messages with a
single call.
Parameters:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
limit (``int``):
Identifier of the last message to be returned.
offset (``int``, *optional*):
Identifier of the first message to be returned.
Defaults to 0.
Returns:
``Generator``: A generator yielding :obj:`~pyrogram.types.Message` objects.
Example:
.. code-block:: python
for message in app.iter_messages("pyrogram", 1, 15000):
print(message.text)
"""
current = offset
while True:
new_diff = min(200, limit - current)
if new_diff <= 0:
return
messages = await self.get_messages(chat_id, list(range(current, current + new_diff + 1)))
for message in messages:
yield message
current += 1
|
class SilentXBot(Client):
def __init__(self):
pass
async def iter_messages(self, chat_id: Union[int, str], limit: int, offset: int=0) -> Optional[AsyncGenerator['types.Message', None]]:
'''Iterate through a chat sequentially.
This convenience method does the same as repeatedly calling :meth:`~pyrogram.Client.get_messages` in a loop, thus saving
you from the hassle of setting up boilerplate code. It is useful for getting the whole chat messages with a
single call.
Parameters:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
limit (``int``):
Identifier of the last message to be returned.
offset (``int``, *optional*):
Identifier of the first message to be returned.
Defaults to 0.
Returns:
``Generator``: A generator yielding :obj:`~pyrogram.types.Message` objects.
Example:
.. code-block:: python
for message in app.iter_messages("pyrogram", 1, 15000):
print(message.text)
'''
pass
| 3
| 1
| 24
| 1
| 13
| 11
| 3
| 0.81
| 1
| 5
| 0
| 0
| 2
| 0
| 2
| 2
| 49
| 2
| 26
| 12
| 18
| 21
| 13
| 7
| 10
| 4
| 1
| 2
| 5
|
328,335
|
NBBotz/Auto-Filter-Bot
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/NBBotz_Auto-Filter-Bot/Script.py
|
Script.script
|
class script(object):
START_TXT = '<b>\u200b\u200b\u200bʜɪɪ {} 👋, \nɪ ᴀᴍ ᴛʜᴇ ᴍᴏꜱᴛ ᴘᴏᴡᴇʀꜰᴜʟʟ ᴍᴇᴅɪᴀ ᴘʀᴏᴠɪᴅᴇʀ ʙᴏᴛ. ɪ ᴄᴀɴ ᴘʀᴏᴠɪᴅᴇᴅ ᴀʟʟ ʟᴀᴛᴇꜱᴛ ᴍᴏᴠɪᴇꜱ ᴏʀ ꜱᴇʀɪᴇꜱ. ᴊᴜꜱᴛ ꜱᴇɴᴅ ᴍᴇ ᴛʜᴇ ᴄᴏʀʀᴇᴄᴛ ɴᴀᴍᴇ .</b>'
FEATURES_TXT = '<b>ʜᴇʀᴇ ɪꜱ ᴀʟʟ ᴍʏ ꜰᴜɴᴛɪᴏɴꜱ.</b>'
ABOUT_TXT = "<b>╭────[ ᴍʏ ᴅᴇᴛᴀɪʟs ]────⍟\n├⍟ ᴍʏ ɴᴀᴍᴇ : <a href=https://t.me/{}>{}</a>\n├⍟ ᴏᴡɴᴇʀ : <a href={}>ᴏᴡɴᴇʀ</a> \n├⍟ ʟɪʙʀᴀʀʏ : <a href='https://docs.pyrogram.org/'>ᴘʏʀᴏɢʀᴀᴍ</a>\n├⍟ ʟᴀɴɢᴜᴀɢᴇ : <a href='https://www.python.org/download/releases/3.0/'>ᴘʏᴛʜᴏɴ 𝟹</a> \n├⍟ ᴅᴀᴛᴀʙᴀꜱᴇ : <a href='https://www.mongodb.com/'>ᴍᴏɴɢᴏ ᴅʙ</a> \n├⍟ ꜱᴇʀᴠᴇʀ : <a href='https://heroku.com/'>ʜᴇʀᴏᴋᴜ</a> \n├⍟ ꜱᴛᴀᴛᴜꜱ : ᴠ4.8 [ ꜱᴛᴀʙʟᴇ ]\n╰───────────────⍟</b>"
FORCESUB_TEXT = '<b>\nɪɴ ᴏʀᴅᴇʀ ᴛᴏ ɢᴇᴛ ᴛʜᴇ ᴍᴏᴠɪᴇ ʀᴇᴏ̨ᴜᴇsᴛᴇᴅ ʙʏ ʏᴏᴜ.\n\nʏᴏᴜ ᴡɪʟʟ ʜᴀᴠᴇ ᴛᴏ ᴊᴏɪɴ ᴏᴜʀ ᴏғғɪᴄɪᴀʟ ᴄʜᴀɴɴᴇʟ.\n\nғɪʀsᴛ, ᴄʟɪᴄᴋ ᴏɴ ᴛʜᴇ "Jᴏɪɴ ᴜᴘᴅᴀᴛᴇ Cʜᴀɴɴᴇʟ" ʙᴜᴛᴛᴏɴ, ᴛʜᴇɴ, ᴄʟɪᴄᴋ ᴏɴ ᴛʜᴇ "ʀᴇᴏ̨ᴜᴇsᴛ ᴛᴏ Jᴏɪɴ" ʙᴜᴛᴛᴏɴ.\n\nᴀғᴛᴇʀ ᴛʜᴀᴛ, ᴛʀʏ ᴀᴄᴄᴇssɪɴɢ ᴛʜᴀᴛ ᴍᴏᴠɪᴇ ᴛʜᴇɴ, ᴄʟɪᴄᴋ ᴏɴ ᴛʜᴇ "ᴛʀʏ ᴀɢᴀɪɴ" ʙᴜᴛᴛᴏɴ.\n </b>'
MULTI_STATUS_TXT = "<b>╭────[ ᴅᴀᴛᴀʙᴀsᴇ 1 ]────⍟</b>\n│\n├⋟ ᴀʟʟ ᴜsᴇʀs ⋟ <code>{}</code>\n├⋟ ᴀʟʟ ɢʀᴏᴜᴘs ⋟ <code>{}</code>\n├⋟ ᴘʀᴇᴍɪᴜᴍ ᴜꜱᴇʀꜱ ⋟ <code>{}</code>\n├⋟ ᴀʟʟ ꜰɪʟᴇs ⋟ <code>{}</code>\n├⋟ ᴜsᴇᴅ sᴛᴏʀᴀɢᴇ ⋟ <code>{}</code>\n├⋟ ꜰʀᴇᴇ sᴛᴏʀᴀɢᴇ ⋟ <code>{}</code>\n│\n<b>├────[ ᴅᴀᴛᴀʙᴀsᴇ 2 ]────⍟</b> \n│\n├⋟ ᴀʟʟ ꜰɪʟᴇs ⋟ <code>{}</code>\n├⋟ ꜱɪᴢᴇ ⋟ <code>{}</code>\n├⋟ ꜰʀᴇᴇ ⋟ <code>{}</code>\n│\n<b>├────[ 🤖 ʙᴏᴛ ᴅᴇᴛᴀɪʟs 🤖 ]────⍟</b> \n│\n├⋟ ᴜᴘᴛɪᴍᴇ ⋟ {}\n├⋟ ʀᴀᴍ ⋟ <code>{}%</code>\n├⋟ ᴄᴘᴜ ⋟ <code>{}%</code> \n│\n├⋟ ʙᴏᴛʜ ᴅʙ ꜰɪʟᴇ'ꜱ: <code>{}</code>\n│\n<b>╰─────────────────────⍟</b>"
STATUS_TXT = '<b>╭────[ ᴅᴀᴛᴀʙᴀsᴇ 1 ]────⍟</b>\n│\n├⋟ ᴀʟʟ ᴜsᴇʀs ⋟ <code>{}</code>\n├⋟ ᴀʟʟ ɢʀᴏᴜᴘs ⋟ <code>{}</code>\n├⋟ ᴘʀᴇᴍɪᴜᴍ ᴜꜱᴇʀꜱ ⋟ <code>{}</code>\n├⋟ ᴀʟʟ ꜰɪʟᴇs ⋟ <code>{}</code>\n├⋟ ᴜsᴇᴅ sᴛᴏʀᴀɢᴇ ⋟ <code>{}</code>\n├⋟ ꜰʀᴇᴇ sᴛᴏʀᴀɢᴇ ⋟ <code>{}</code>\n│\n<b>├────[ 🤖 ʙᴏᴛ ᴅᴇᴛᴀɪʟs 🤖 ]────⍟</b> \n│\n├⋟ ᴜᴘᴛɪᴍᴇ ⋟ {}\n├⋟ ʀᴀᴍ ⋟ <code>{}%</code>\n├⋟ ᴄᴘᴜ ⋟ <code>{}%</code> \n│\n<b>╰─────────────────────⍟</b>'
EARN_INFO = '<b><i><blockquote>💸 ʜᴏᴡ ᴛᴏ ᴇᴀʀɴ ᴍᴏɴᴇʏ ʙʏ ᴛʜɪs ʙᴏᴛ - </blockquote>\n\n1:- ʏᴏᴜ ᴍᴜꜱᴛ ʜᴀᴠᴇ ᴀᴛʟᴇᴀꜱᴛ ᴏɴᴇ ɢʀᴏᴜᴘ ᴡɪᴛʜ ᴍɪɴɪᴍᴜᴍ 100 ᴍᴇᴍʙᴇʀꜱ.\n\n2:- ᴍᴀᴋᴇ <a href=https://t.me/{}</a> ᴀᴅᴍɪɴ ɪɴ ʏᴏᴜʀ ɢʀᴏᴜᴘ.\n\n3:- ᴄʀᴇᴀᴛᴇ ᴀᴄᴄᴏᴜɴᴛ ᴏɴ ᴀɴʏ sʜᴏʀᴛɴᴇʀ ʟɪᴋᴇ ʏᴏᴜ ᴄᴀɴ ᴀʟsᴏ ᴜsᴇ ᴛʜɪs ʙᴇsᴛ sʜᴏʀᴛɴᴇʀ <a href=https://zipshort.net/ref/noyanbanerjee>ᴢɪᴘꜱʜᴏʀᴛ</a>.\n\n4:- ᴛʜᴇɴ ꜱᴇɴᴅ /settings ᴄᴏᴍᴍᴀᴅ ɪɴ ʏᴏᴜʀ ɢʀᴏᴜᴘ ᴀɴᴅ ᴇᴅɪᴛ ꜱᴇᴛᴛɪɴɢꜱ ᴀꜱ ʏᴏᴜʀ ᴡɪꜱᴇ\n\nʏᴏᴜ ᴄᴀɴ ᴄʜᴇᴄᴋ ʏᴏᴜʀ ᴀʟʟ ᴅᴇᴛᴀɪʟs ʙʏ /details ᴄᴏᴍᴍᴀɴᴅ\n\n💯 ɴᴏᴛᴇ - ᴛʜɪs ʙᴏᴛ ɪs ꜰʀᴇᴇ ᴛᴏ ᴀʟʟ, ʏᴏᴜ ᴄᴀɴ ᴜsᴇ ᴛʜɪs ʙᴏᴛ ɪɴ ʏᴏᴜʀ ɢʀᴏᴜᴘs ᴀɴᴅ ᴇᴀʀɴ ᴜɴʟɪᴍɪᴛᴇᴅ ᴍᴏɴᴇʏ.</i></b>'
VERIFICATION_TEXT = '<b><i>👋 ʜᴇʏ {},\n\n📌 ʏᴏᴜ ᴀʀᴇ ɴᴏᴛ ᴠᴇʀɪꜰɪᴇᴅ ᴛᴏᴅᴀʏ, ᴘʟᴇᴀꜱᴇ ᴄʟɪᴄᴋ ᴏɴ ᴠᴇʀɪꜰʏ & ɢᴇᴛ ᴜɴʟɪᴍɪᴛᴇᴅ ᴀᴄᴄᴇꜱꜱ ꜰᴏʀ ᴛɪʟʟ ɴᴇxᴛ ᴠᴇʀɪꜰɪᴄᴀᴛɪᴏɴ.\n\n#ᴠᴇʀɪꜰɪᴄᴀᴛɪᴏɴ:- 1/3 ✓\n\nɪꜰ ʏᴏᴜ ᴡᴀɴᴛ ᴅɪʀᴇᴄᴛ ꜰɪʟᴇs ᴛʜᴇɴ ʏᴏᴜ ᴄᴀɴ ᴛᴀᴋᴇ ᴘʀᴇᴍɪᴜᴍ sᴇʀᴠɪᴄᴇ (ɴᴏ ɴᴇᴇᴅ ᴛᴏ ᴠᴇʀɪꜰʏ).</i></b>'
VERIFY_COMPLETE_TEXT = '<b><i>👋 ʜᴇʏ {},\n\nʏᴏᴜ ʜᴀᴠᴇ ᴄᴏᴍᴘʟᴇᴛᴇᴅ ᴛʜᴇ 1ꜱᴛ ᴠᴇʀɪꜰɪᴄᴀᴛɪᴏɴ ✓\n\nɴᴏᴡ ʏᴏᴜ ʜᴀᴠᴇ ᴜɴʟɪᴍɪᴛᴇᴅ ᴀᴄᴄᴇss ꜰᴏʀ ɴᴇxᴛ ᴠᴇʀɪꜰɪᴄᴀᴛɪᴏɴ.</i></b>'
SECOND_VERIFICATION_TEXT = '<b><i>👋 ʜᴇʏ {},\n\n📌 ʏᴏᴜ ᴀʀᴇ ɴᴏᴛ ᴠᴇʀɪꜰɪᴇᴅ, ᴛᴀᴘ ᴏɴ ᴛʜᴇ ᴠᴇʀɪꜰʏ ʟɪɴᴋ ᴀɴᴅ ɢᴇᴛ ᴜɴʟɪᴍɪᴛᴇᴅ ᴀᴄᴄᴇss ꜰᴏʀ ᴛɪʟʟ ɴᴇxᴛ ᴠᴇʀɪғɪᴄᴀᴛɪᴏɴ.\n\n#ᴠᴇʀɪꜰɪᴄᴀᴛɪᴏɴ:- 2/3 ✓\n\nɪꜰ ʏᴏᴜ ᴡᴀɴᴛ ᴅɪʀᴇᴄᴛ ꜰɪʟᴇs ᴛʜᴇɴ ʏᴏᴜ ᴄᴀɴ ᴛᴀᴋᴇ ᴘʀᴇᴍɪᴜᴍ sᴇʀᴠɪᴄᴇ (ɴᴏ ɴᴇᴇᴅ ᴛᴏ ᴠᴇʀɪꜰʏ).</i></b>'
SECOND_VERIFY_COMPLETE_TEXT = '<b><i>👋 ʜᴇʏ {},\n \nʏᴏᴜ ʜᴀᴠᴇ ᴄᴏᴍᴘʟᴇᴛᴇᴅ ᴛʜᴇ 2ɴᴅ ᴠᴇʀɪꜰɪᴄᴀᴛɪᴏɴ ✓\n\nɴᴏᴡ ʏᴏᴜ ʜᴀᴠᴇ ᴜɴʟɪᴍɪᴛᴇᴅ ᴀᴄᴄᴇss ꜰᴏʀ ɴᴇxᴛ ᴠᴇʀɪꜰɪᴄᴀᴛɪᴏɴ.</i></b>'
THIRDT_VERIFICATION_TEXT = '<b><i>👋 ʜᴇʏ {},\n \n📌 ʏᴏᴜ ᴀʀᴇ ɴᴏᴛ ᴠᴇʀɪꜰɪᴇᴅ, ᴛᴀᴘ ᴏɴ ᴛʜᴇ ᴠᴇʀɪꜰʏ ʟɪɴᴋ & ɢᴇᴛ ᴜɴʟɪᴍɪᴛᴇᴅ ᴀᴄᴄᴇss ꜰᴏʀ ɴᴇxᴛ ꜰᴜʟʟ ᴅᴀʏ.</u>\n\n#ᴠᴇʀɪꜰɪᴄᴀᴛɪᴏɴ:- 3/3 ✓\n\nɪꜰ ʏᴏᴜ ᴡᴀɴᴛ ᴅɪʀᴇᴄᴛ ꜰɪʟᴇs ᴛʜᴇɴ ʏᴏᴜ ᴄᴀɴ ᴛᴀᴋᴇ ᴘʀᴇᴍɪᴜᴍ sᴇʀᴠɪᴄᴇ (ɴᴏ ɴᴇᴇᴅ ᴛᴏ ᴠᴇʀɪꜰʏ)</i></b>'
THIRDT_VERIFY_COMPLETE_TEXT = '<b><i>👋 ʜᴇʏ {},\n \nʏᴏᴜ ʜᴀᴠᴇ ᴄᴏᴍᴘʟᴇᴛᴇᴅ ᴛʜᴇ 3ʀᴅ ᴠᴇʀɪꜰɪᴄᴀᴛɪᴏɴ ✓\n\nɴᴏᴡ ʏᴏᴜ ʜᴀᴠᴇ ᴜɴʟɪᴍɪᴛᴇᴅ ᴀᴄᴄᴇss ꜰᴏʀ ɴᴇxᴛ ꜰᴜʟʟ ᴅᴀʏ.</i></b>'
VERIFIED_LOG_TEXT = 'ᴜꜱᴇʀ ᴠᴇʀɪꜰɪᴇᴅ ꜱᴜᴄᴄᴇꜱꜱꜰᴜʟʟʏ ✓\n\n👤 ɴᴀᴍᴇ:- {} [ <code>{}</code> ]\n\n📆 ᴅᴀᴛᴇ:- <code>{} </code>\n\n#Verificaton_{}_Completed'
LOG_TEXT_G = '#NewGroup\n \nGʀᴏᴜᴘ = {}\nIᴅ = <code>{}</code>\nTᴏᴛᴀʟ Mᴇᴍʙᴇʀs = <code>{}</code>\nAᴅᴅᴇᴅ Bʏ - {}\n'
LOG_TEXT_P = '#NewUser\n \nIᴅ - <code>{}</code>\nNᴀᴍᴇ - {}\n'
ALRT_TXT = "ʜᴇʟʟᴏ {},\nᴛʜɪꜱ ɪꜱ ɴᴏᴛ ʏᴏᴜʀ ᴍᴏᴠɪᴇ ʀᴇǫᴜᴇꜱᴛ,\nʀᴇǫᴜᴇꜱᴛ ʏᴏᴜʀ'ꜱ..."
OLD_ALRT_TXT = 'ʜᴇʏ {},\nʏᴏᴜ ᴀʀᴇ ᴜꜱɪɴɢ ᴏɴᴇ ᴏꜰ ᴍʏ ᴏʟᴅ ᴍᴇꜱꜱᴀɢᴇꜱ, \nᴘʟᴇᴀꜱᴇ ꜱᴇɴᴅ ᴛʜᴇ ʀᴇǫᴜᴇꜱᴛ ᴀɢᴀɪɴ.'
CUDNT_FND = "ɪ ᴄᴏᴜʟᴅɴ'ᴛ ꜰɪɴᴅ ᴀɴʏᴛʜɪɴɢ ʀᴇʟᴀᴛᴇᴅ ᴛᴏ {}\nᴅɪᴅ ʏᴏᴜ ᴍᴇᴀɴ ᴀɴʏ ᴏɴᴇ ᴏꜰ ᴛʜᴇꜱᴇ ?"
I_CUDNT = '<b><i>ᴛʜɪꜱ ᴍᴏᴠɪᴇ ɪꜱ ɴᴏᴛ ᴄᴜʀʀᴇɴᴛʟʏ ᴀᴠᴀɪʟᴀʙʟᴇ.\n\nɪᴛ ʜᴀꜱ ᴇɪᴛʜᴇʀ ɴᴏᴛ ʙᴇᴇɴ ʀᴇʟᴇᴀꜱᴇᴅ ᴏʀ ʜᴀꜱ ɴᴏᴛ ʏᴇᴛ ʙᴇᴇɴ ᴀᴅᴅᴇᴅ ᴛᴏ ᴛʜᴇ ᴅᴀᴛᴀʙᴀꜱᴇ.</i></b>'
I_CUD_NT = '<b><i>ᴛʜɪꜱ ᴍᴏᴠɪᴇ ɪꜱ ɴᴏᴛ ᴄᴜʀʀᴇɴᴛʟʏ ᴀᴠᴀɪʟᴀʙʟᴇ.\n\nɪᴛ ʜᴀꜱ ᴇɪᴛʜᴇʀ ɴᴏᴛ ʙᴇᴇɴ ʀᴇʟᴇᴀꜱᴇᴅ ᴏʀ ʜᴀꜱ ɴᴏᴛ ʏᴇᴛ ʙᴇᴇɴ ᴀᴅᴅᴇᴅ ᴛᴏ ᴛʜᴇ ᴅᴀᴛᴀʙᴀꜱᴇ.</i></b>'
MVE_NT_FND = '<b><i>ᴛʜɪꜱ ᴍᴏᴠɪᴇ ɪꜱ ɴᴏᴛ ᴄᴜʀʀᴇɴᴛʟʏ ᴀᴠᴀɪʟᴀʙʟᴇ.\n\nɪᴛ ʜᴀꜱ ᴇɪᴛʜᴇʀ ɴᴏᴛ ʙᴇᴇɴ ʀᴇʟᴇᴀꜱᴇᴅ ᴏʀ ʜᴀꜱ ɴᴏᴛ ʏᴇᴛ ʙᴇᴇɴ ᴀᴅᴅᴇᴅ ᴛᴏ ᴛʜᴇ ᴅᴀᴛᴀʙᴀꜱᴇ.</i></b>'
TOP_ALRT_MSG = 'ꜱᴇᴀʀᴄʜɪɴɢ ꜰᴏʀ ǫᴜᴇʀʏ ɪɴ ᴍʏ ᴅᴀᴛᴀʙᴀꜱᴇ...'
MELCOW_ENG = "<b>👋 ʜᴇʏ {},\n\n🍁 ᴡᴇʟᴄᴏᴍᴇ ᴛᴏ\n🌟 {} \n\n🔍 ʜᴇʀᴇ ʏᴏᴜ ᴄᴀɴ ꜱᴇᴀʀᴄʜ ʏᴏᴜʀ ꜰᴀᴠᴏᴜʀɪᴛᴇ ᴍᴏᴠɪᴇꜱ ᴏʀ ꜱᴇʀɪᴇꜱ ʙʏ ᴊᴜꜱᴛ ᴛʏᴘɪɴɢ ɪᴛ'ꜱ ɴᴀᴍᴇ 🔎\n\n⚠️ ɪꜰ ʏᴏᴜ'ʀᴇ ʜᴀᴠɪɴɢ ᴀɴʏ ᴘʀᴏʙʟᴇᴍ ʀᴇɢᴀʀᴅɪɴɢ ᴅᴏᴡɴʟᴏᴀᴅɪɴɢ ᴏʀ ꜱᴏᴍᴇᴛʜɪɴɢ ᴇʟꜱᴇ ᴛʜᴇɴ ᴍᴇꜱꜱᴀɢᴇ ʜᴇʀᴇ 👇</b>"
DISCLAIMER_TXT = '\n<b>ᴛʜɪꜱ ɪꜱ ᴀɴ ᴏᴘᴇɴ ꜱᴏᴜʀᴄᴇ ᴘʀᴏᴊᴇᴄᴛ.\n\nᴀʟʟ ᴛʜᴇ ꜰɪʟᴇꜱ ɪɴ ᴛʜɪꜱ ʙᴏᴛ ᴀʀᴇ ꜰʀᴇᴇʟʏ ᴀᴠᴀɪʟᴀʙʟᴇ ᴏɴ ᴛʜᴇ ɪɴᴛᴇʀɴᴇᴛ ᴏʀ ᴘᴏꜱᴛᴇᴅ ʙʏ ꜱᴏᴍᴇʙᴏᴅʏ ᴇʟꜱᴇ. ᴊᴜꜱᴛ ꜰᴏʀ ᴇᴀꜱʏ ꜱᴇᴀʀᴄʜɪɴɢ ᴛʜɪꜱ ʙᴏᴛ ɪꜱ ɪɴᴅᴇxɪɴɢ ꜰɪʟᴇꜱ ᴡʜɪᴄʜ ᴀʀᴇ ᴀʟʀᴇᴀᴅʏ ᴜᴘʟᴏᴀᴅᴇᴅ ᴏɴ ᴛᴇʟᴇɢʀᴀᴍ. ᴡᴇ ʀᴇꜱᴘᴇᴄᴛ ᴀʟʟ ᴛʜᴇ ᴄᴏᴘʏʀɪɢʜᴛ ʟᴀᴡꜱ ᴀɴᴅ ᴡᴏʀᴋꜱ ɪɴ ᴄᴏᴍᴘʟɪᴀɴᴄᴇ ᴡɪᴛʜ ᴅᴍᴄᴀ ᴀɴᴅ ᴇᴜᴄᴅ. ɪꜰ ᴀɴʏᴛʜɪɴɢ ɪꜱ ᴀɢᴀɪɴꜱᴛ ʟᴀᴡ ᴘʟᴇᴀꜱᴇ ᴄᴏɴᴛᴀᴄᴛ ᴍᴇ ꜱᴏ ᴛʜᴀᴛ ɪᴛ ᴄᴀɴ ʙᴇ ʀᴇᴍᴏᴠᴇᴅ ᴀꜱᴀᴘ. ɪᴛ ɪꜱ ꜰᴏʀʙɪʙʙᴇɴ ᴛᴏ ᴅᴏᴡɴʟᴏᴀᴅ, ꜱᴛʀᴇᴀᴍ, ʀᴇᴘʀᴏᴅᴜᴄᴇ, ꜱʜᴀʀᴇ ᴏʀ ᴄᴏɴꜱᴜᴍᴇ ᴄᴏɴᴛᴇɴᴛ ᴡɪᴛʜᴏᴜᴛ ᴇxᴘʟɪᴄɪᴛ ᴘᴇʀᴍɪꜱꜱɪᴏɴ ꜰʀᴏᴍ ᴛʜᴇ ᴄᴏɴᴛᴇɴᴛ ᴄʀᴇᴀᴛᴏʀ ᴏʀ ʟᴇɢᴀʟ ᴄᴏᴘʏʀɪɢʜᴛ ʜᴏʟᴅᴇʀ. ɪꜰ ʏᴏᴜ ʙᴇʟɪᴇᴠᴇ ᴛʜɪꜱ ʙᴏᴛ ɪꜱ ᴠɪᴏʟᴀᴛɪɴɢ ʏᴏᴜʀ ɪɴᴛᴇʟʟᴇᴄᴛᴜᴀʟ ᴘʀᴏᴘᴇʀᴛʏ, ᴄᴏɴᴛᴀᴄᴛ ᴛʜᴇ ʀᴇꜱᴘᴇᴄᴛɪᴠᴇ ᴄʜᴀɴɴᴇʟꜱ ꜰᴏʀ ʀᴇᴍᴏᴠᴀʟ. ᴛʜᴇ ʙᴏᴛ ᴅᴏᴇꜱ ɴᴏᴛ ᴏᴡɴ ᴀɴʏ ᴏꜰ ᴛʜᴇꜱᴇ ᴄᴏɴᴛᴇɴᴛꜱ, ɪᴛ ᴏɴʟʏ ɪɴᴅᴇx ᴛʜᴇ ꜰɪʟᴇꜱ ꜰʀᴏᴍ ᴛᴇʟᴇɢʀᴀᴍ. \n</b>'
PREMIUM_TEXT = "<blockquote>🎖️ <b>ᴀᴠᴀɪʟᴀʙʟᴇ ᴘʟᴀɴs</b></blockquote>\n\n◉ 07 ᴅᴀʏꜱ - 15 ₹ / 15 ꜱᴛᴀʀ \n◉ 15 ᴅᴀʏꜱ - 30 ₹ / 30 ꜱᴛᴀʀ \n◉ 01 ᴍᴏɴᴛʜꜱ - 60 ₹ / 60 ꜱᴛᴀʀ \n◉ 02 ᴍᴏɴᴛʜꜱ - 120 ₹ / 120 ꜱᴛᴀʀ \n◉ 03 ᴍᴏɴᴛʜꜱ - 220 ₹ / 220 ꜱᴛᴀʀ\n\n•─────•─────────•─────•\n🏷️ <a href='https://t.me/+blcE2jS-iGtkMjNl'>ꜱᴜʙꜱᴄʀɪᴘᴛɪᴏɴ ᴘʀᴏᴏꜰ</a>\n\n‼️ ᴍᴜꜱᴛ ꜱᴇɴᴅ ꜱᴄʀᴇᴇɴꜱʜᴏᴛ ᴀꜰᴛᴇʀ ᴘᴀʏᴍᴇɴᴛ.\n‼️ ᴀꜰᴛᴇʀ ꜱᴇɴᴅɪɴɢ ꜱᴄʀᴇᴇɴꜱʜᴏᴛ ɢɪᴠᴇ ᴜꜱ ꜱᴏᴍᴇᴛɪᴍᴇꜱ ᴛᴏ ᴀᴅᴅ ʏᴏᴜ ɪɴ ᴘʀᴇᴍɪᴜᴍ ʟɪꜱᴛ."
PREMIUM_STAR_TEXT = '<b><blockquote>ᴘᴀʏᴍᴇɴᴛ ᴍᴇᴛʜᴏᴅ: ᴛᴇʟᴇɢʀᴀᴍ ꜱᴛᴀʀꜱ ⭐</blockquote>\n\nɴᴏᴡ ʏᴏᴜ ᴄᴀɴ ʙᴜʏ ᴏᴜʀ ᴘʀᴇᴍɪᴜᴍ ꜱᴇʀᴠɪᴄᴇ ᴜꜱɪɴɢ ᴛᴇʟᴇɢʀᴀᴍ ꜱᴛᴀʀꜱ. \n\nɪꜰ ʏᴏᴜ ꜰᴀᴄᴇ ᴀɴʏ ᴘʀᴏʙʟᴇᴍ, ᴛᴀᴋᴇ ᴀ ꜱᴄʀᴇᴇɴꜱʜᴏᴛ ᴀɴᴅ ꜱᴇɴᴅ ɪᴛ ᴛᴏ - @SilentXBotz\n\nꜱᴇʟᴇᴄᴛ ʏᴏᴜʀ ᴅᴇꜱɪʀᴇᴅ ᴀᴍᴏᴜɴᴛ ᴀɴᴅ ᴘᴜʀᴄʜᴀꜱᴇ ᴀ ꜱᴜʙꜱᴄʀɪᴘᴛɪᴏɴ 👇.</b>\n'
PREMIUM_UPI_TEXT = '<b><blockquote>ᴘᴀʏᴍᴇɴᴛ ᴍᴇᴛʜᴏᴅ: ᴜᴘɪ</blockquote>\n\nʏᴏᴜ ᴄᴀɴ ᴘᴜʀᴄʜᴀꜱᴇ ᴘʀᴇᴍɪᴜᴍ ᴛʜʀᴏᴜɢʜ ᴜᴘɪ , ɴᴇᴛ ʙᴀɴᴋɪɴɢ.\n\n💳 ᴜᴘɪ ɪᴅ - <code>ɴᴏ ᴀᴠᴀɪʟᴀʙʟᴇ ʀɪɢʜᴛ ɴᴏᴡ</code>\n\n💢 ᴍᴜꜱᴛ ꜱᴇɴᴅ ꜱᴄʀᴇᴇɴꜱʜᴏᴛ ᴀꜰᴛᴇʀ ᴘᴀʏᴍᴇɴᴛ.\n\n‼️ ᴀꜰᴛᴇʀ ꜱᴇɴᴅɪɴɢ ꜱᴄʀᴇᴇɴꜱʜᴏᴛ ᴘʟᴇᴀꜱᴇ ɢɪᴠᴇ ᴜꜱ ꜱᴏᴍᴇᴛɪᴍᴇ ᴛᴏ ᴀᴅᴅ ʏᴏᴜ ɪɴ ᴘʀᴇᴍɪᴜᴍ ʟɪꜱᴛ.</b>'
BPREMIUM_TXT = '<blockquote>🎁 <b>ᴘʀᴇᴍɪᴜᴍ ꜰᴇᴀᴛᴜʀᴇꜱ</b> :</blockquote>\n\n○ ɴᴏ ɴᴇᴇᴅ ᴛᴏ ᴠᴇʀɪꜰʏ\n○ ɴᴏ ɴᴇᴇᴅ ᴛᴏ ᴏᴘᴇɴ ʟɪɴᴋꜱ\n○ ᴅɪʀᴇᴄᴛ ꜰɪʟᴇꜱ \n○ ᴀᴅ-ꜰʀᴇᴇ ᴇxᴘᴇʀɪᴇɴᴄᴇ \n○ ʜɪɢʜ-ꜱᴘᴇᴇᴅ ᴅᴏᴡɴʟᴏᴀᴅ ʟɪɴᴋ \n○ ᴍᴜʟᴛɪ-ᴘʟᴀʏᴇʀ ꜱᴛʀᴇᴀᴍɪɴɢ ʟɪɴᴋꜱ \n○ ᴜɴʟɪᴍɪᴛᴇᴅ ᴍᴏᴠɪᴇꜱ & ꜱᴇʀɪᴇꜱ \n○ ꜰᴜʟʟ ᴀᴅᴍɪɴ ꜱᴜᴘᴘᴏʀᴛ \n○ ʀᴇǫᴜᴇꜱᴛ ᴡɪʟʟ ʙᴇ ᴄᴏᴍᴘʟᴇᴛᴇᴅ ɪɴ 1ʜ [ ɪꜰ ᴀᴠᴀɪʟᴀʙʟᴇ ]\n\n• ʏᴏᴜ ᴄᴀɴ ɢᴇᴛ ᴘʀᴇᴍɪᴜᴍ ʙʏ ʀᴇꜰᴇʀɪɴɢ ʏᴏᴜʀ ꜰʀɪᴇɴᴅꜱ ᴏʀ ʏᴏᴜ ᴄᴀɴ ʙᴜʏ ᴘʀᴇᴍɪᴜᴍ ꜱᴇʀᴠɪᴄᴇ \n\n•─────•─────────•─────•\n◉ ᴄʜᴇᴄᴋ ʏᴏᴜʀ ᴀᴄᴛɪᴠᴇ ᴘʟᴀɴ : /myplan\n\n‼️ ᴀꜰᴛᴇʀ ꜱᴇɴᴅɪɴɢ ꜱᴄʀᴇᴇɴꜱʜᴏᴛ ɢɪᴠᴇ ᴜꜱ ꜱᴏᴍᴇᴛɪᴍᴇꜱ ᴛᴏ ᴀᴅᴅ ʏᴏᴜ ɪɴ ᴘʀᴇᴍɪᴜᴍ ʟɪꜱᴛ.'
NORSLTS = ' \n#NoResults\n\nIᴅ : <code>{}</code>\nNᴀᴍᴇ : {}\n\nMᴇꜱꜱᴀɢᴇ : <b>{}</b>'
CAPTION = '<b>{file_name}\nUploaded By: <a herf="https://t.me/SilentXBotz">[SilentXBotz]</a></b>'
IMDB_TEMPLATE_TXT = '\n<b>🏷 Title</b>: <a href={url}>{title}</a>\n🎭 Genres: {genres}\n� Year: <a href={url}/releaseinfo>{year}</a>\n🌟 Rating: <a href={url}/ratings>{rating}</a> / 10 (based on {votes} user ratings.)\n📀 RunTime: {runtime} Minutes\n\n⏰Result Shown in: {remaining_seconds} <i>seconds</i> 🔥\nRequested by : {message.from_user.mention}</b>'
RESTART_TXT = '\n<b>{} ʙᴏᴛ ʀᴇꜱᴛᴀʀᴛᴇᴅ ꜱᴜᴄᴄᴇꜱꜱꜰᴜʟʟʏ !\n\n📅 ᴅᴀᴛᴇ : <code>{}</code>\n⏰ ᴛɪᴍᴇ : <code>{}</code>\n🌐 ᴛɪᴍᴇᴢᴏɴᴇ : <code>ᴀꜱɪᴀ/ᴋᴏʟᴋᴀᴛᴀ</code>\n🛠️ ʙᴜɪʟᴅ ꜱᴛᴀᴛᴜꜱ : <code>V4.2 [ ꜱᴛᴀʙʟᴇ ]</code>\n</b>'
LOGO = "\n ____ _ _ _ __ ______ _ \n / ___|(_) | ___ _ __ | |_\\ \\/ / __ ) ___ | |_ ____\n \\___ \\| | |/ _ \\ '_ \\| __|\\ /| _ \\ / _ \\| __|_ /\n ___) | | | __/ | | | |_ / \\| |_) | (_) | |_ / / \n |____/|_|_|\\___|_| |_|\\__/_/\\_\\____/ \\___/ \\__/___|\n \n𝙱𝙾𝚃 𝚆𝙾𝚁𝙺𝙸𝙽𝙶 𝙿𝚁𝙾𝙿𝙴𝚁𝙻𝚈...."
ADMIN_CMD = "ʜᴇʏ 👋,\n\n📚 ʜᴇʀᴇ ᴀʀᴇ ᴍʏ ᴄᴏᴍᴍᴀɴᴅꜱ ʟɪꜱᴛ ꜰᴏʀ ᴀʟʟ ʙᴏᴛ ᴀᴅᴍɪɴꜱ ⇊\n\n• /movie_update - <code>ᴏɴ / ᴏғғ ᴀᴄᴄᴏʀᴅɪɴɢ ʏᴏᴜʀ ɴᴇᴇᴅᴇᴅ...</code> \n• /pm_search - <code>ᴘᴍ sᴇᴀʀᴄʜ ᴏɴ / ᴏғғ ᴀᴄᴄᴏʀᴅɪɴɢ ʏᴏᴜʀ ɴᴇᴇᴅᴇᴅ...</code>\n• /verifyon - <code>ᴛᴜʀɴ ᴏɴ ᴠᴇʀɪꜰɪᴄᴀᴛɪᴏɴ (ᴏɴʟʏ ᴡᴏʀᴋ ɪɴ ɢʀᴏᴜᴘ)</code>\n• /verifyoff - <code>ᴛᴜʀɴ ᴏꜰꜰ ᴠᴇʀɪꜰɪᴄᴀᴛɪᴏɴ (ᴏɴʟʏ ᴡᴏʀᴋ ɪɴ ɢʀᴏᴜᴘ)</code>\n• /logs - <code>ɢᴇᴛ ᴛʜᴇ ʀᴇᴄᴇɴᴛ ᴇʀʀᴏʀꜱ.</code>\n• /delete - <code>ᴅᴇʟᴇᴛᴇ ᴀ ꜱᴘᴇᴄɪꜰɪᴄ ꜰɪʟᴇ ꜰʀᴏᴍ ᴅʙ.</code>\n• /users - <code>ɢᴇᴛ ʟɪꜱᴛ ᴏꜰ ᴍʏ ᴜꜱᴇʀꜱ ᴀɴᴅ ɪᴅꜱ.</code>\n• /chats - <code>ɢᴇᴛ ʟɪꜱᴛ ᴏꜰ ᴍʏ ᴄʜᴀᴛꜱ ᴀɴᴅ ɪᴅꜱ.</code>\n• /leave - <code>ʟᴇᴀᴠᴇ ꜰʀᴏᴍ ᴀ ᴄʜᴀᴛ.</code>\n• /disable - <code>ᴅɪꜱᴀʙʟᴇ ᴀ ᴄʜᴀᴛ.</code>\n• /ban - <code>ʙᴀɴ ᴀ ᴜꜱᴇʀ.</code>\n• /unban - <code>ᴜɴʙᴀɴ ᴀ ᴜꜱᴇʀ.</code>\n• /channel - <code>ɢᴇᴛ ʟɪꜱᴛ ᴏꜰ ᴛᴏᴛᴀʟ ᴄᴏɴɴᴇᴄᴛᴇᴅ ɢʀᴏᴜᴘꜱ.</code>\n• /broadcast - <code>ʙʀᴏᴀᴅᴄᴀꜱᴛ ᴀ ᴍᴇꜱꜱᴀɢᴇ ᴛᴏ ᴀʟʟ ᴜꜱᴇʀꜱ.</code>\n• /grp_broadcast - <code>ʙʀᴏᴀᴅᴄᴀsᴛ ᴀ ᴍᴇssᴀɢᴇ ᴛᴏ ᴀʟʟ ᴄᴏɴɴᴇᴄᴛᴇᴅ ɢʀᴏᴜᴘs.</code>\n• /gfilter - <code>ᴀᴅᴅ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀs.</code>\n• /gfilters - <code>ᴠɪᴇᴡ ʟɪsᴛ ᴏғ ᴀʟʟ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀs.</code>\n• /delg - <code>ᴅᴇʟᴇᴛᴇ ᴀ sᴘᴇᴄɪғɪᴄ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀ.</code>\n• /delallg - <code>ᴅᴇʟᴇᴛᴇ ᴀʟʟ Gғɪʟᴛᴇʀs ғʀᴏᴍ ᴛʜᴇ ʙᴏᴛ's ᴅᴀᴛᴀʙᴀsᴇ.</code>\n• /deletefiles - <code>ᴅᴇʟᴇᴛᴇ CᴀᴍRɪᴘ ᴀɴᴅ PʀᴇDVD ғɪʟᴇs ғʀᴏᴍ ᴛʜᴇ ʙᴏᴛ's ᴅᴀᴛᴀʙᴀsᴇ.</code>\n• /send - <code>ꜱᴇɴᴅ ᴍᴇꜱꜱᴀɢᴇ ᴛᴏ ᴀ ᴘᴀʀᴛɪᴄᴜʟᴀʀ ᴜꜱᴇʀ.</code>\n• /add_premium - <code>ᴀᴅᴅ ᴀɴʏ ᴜꜱᴇʀ ᴛᴏ ᴘʀᴇᴍɪᴜᴍ.</code>\n• /remove_premium - <code>ʀᴇᴍᴏᴠᴇ ᴀɴʏ ᴜꜱᴇʀ ꜰʀᴏᴍ ᴘʀᴇᴍɪᴜᴍ.</code>\n• /premium_users - <code>ɢᴇᴛ ʟɪꜱᴛ ᴏꜰ ᴘʀᴇᴍɪᴜᴍ ᴜꜱᴇʀꜱ.</code>\n• /get_premium - <code>ɢᴇᴛ ɪɴꜰᴏ ᴏꜰ ᴀɴʏ ᴘʀᴇᴍɪᴜᴍ ᴜꜱᴇʀ.</code>\n• /restart - <code>ʀᴇꜱᴛᴀʀᴛ ᴛʜᴇ ʙᴏᴛ.</code>"
GROUP_CMD = 'ʜᴇʏ 👋,\n📚 ʜᴇʀᴇ ᴀʀᴇ ᴍʏ ᴄᴏᴍᴍᴀɴᴅꜱ ʟɪꜱᴛ ꜰᴏʀ ᴄᴜꜱᴛᴏᴍɪᴢᴇᴅ ɢʀᴏᴜᴘꜱ ⇊\n\n• /settings - ᴄʜᴀɴɢᴇ ᴛʜᴇ ɢʀᴏᴜᴘ ꜱᴇᴛᴛɪɴɢꜱ ᴀꜱ ʏᴏᴜʀ ᴡɪꜱʜ.\n• /set_shortner - ꜱᴇᴛ ʏᴏᴜʀ 1ꜱᴛ ꜱʜᴏʀᴛɴᴇʀ.\n• /set_shortner_2 - ꜱᴇᴛ ʏᴏᴜʀ 2ɴᴅ ꜱʜᴏʀᴛɴᴇʀ.\n• /set_shortner_3 - ꜱᴇᴛ ʏᴏᴜʀ 3ʀᴅ ꜱʜᴏʀᴛɴᴇʀ.\n• /set_tutorial - ꜱᴇᴛ ʏᴏᴜʀ 1ꜱᴛ ᴛᴜᴛᴏʀɪᴀʟ ᴠɪᴅᴇᴏ .\n• /set_tutorial_2 - ꜱᴇᴛ ʏᴏᴜʀ 2ɴᴅ ᴛᴜᴛᴏʀɪᴀʟ ᴠɪᴅᴇᴏ .\n• /set_tutorial_3 - ꜱᴇᴛ ʏᴏᴜʀ 3ʀᴅ ᴛᴜᴛᴏʀɪᴀʟ ᴠɪᴅᴇᴏ .\n• /set_time - ꜱᴇᴛ 1ꜱᴛ ᴠᴇʀɪꜰɪᴄᴀᴛɪᴏɴ ɢᴀᴘ.\n• /set_time_2 - ꜱᴇᴛ 2ɴᴅ ᴠᴇʀɪꜰɪᴄᴀᴛɪᴏɴ ɢᴀᴘ.\n• /set_log_channel - ꜱᴇᴛ ᴠᴇʀɪꜰɪᴄᴀᴛɪᴏɴ ʟᴏɢ ᴄʜᴀɴɴᴇʟ.\n• /set_fsub - ꜱᴇᴛ ᴄᴜꜱᴛᴏᴍ ꜰᴏʀᴄᴇ ꜱᴜʙ ᴄʜᴀɴɴᴇʟ.\n• /remove_fsub - ʀᴇᴍᴏᴠᴇ ᴄᴜꜱᴛᴏᴍ ꜰᴏʀᴄᴇ ꜱᴜʙ ᴄʜᴀɴɴᴇʟ.\n• /reset_group - ʀᴇꜱᴇᴛ ʏᴏᴜʀ ꜱᴇᴛᴛɪɴɢꜱ.\n• /details - ᴄʜᴇᴄᴋ ʏᴏᴜʀ ꜱᴇᴛᴛɪɴɢꜱ.'
PAGE_TXT = 'ᴡʜʏ ᴀʀᴇ ʏᴏᴜ ꜱᴏ ᴄᴜʀɪᴏᴜꜱ ⁉️'
SOURCE_TXT = '<b>ՏOᑌᖇᑕᗴ ᑕOᗪᗴ :</b> 👇\nThis Is An Open-Source Project. You Can Use It Freely, But Selling The Source Code Is Strictly Prohibited.'
|
class script(object):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 324
| 95
| 224
| 39
| 223
| 7
| 39
| 39
| 38
| 0
| 1
| 0
| 0
|
328,336
|
NBBotz/Auto-Filter-Bot
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/NBBotz_Auto-Filter-Bot/Lucia/util/config_parser.py
|
config_parser.TokenParser
|
from os import environ
from typing import Dict, Optional
class TokenParser:
def __init__(self, config_file: Optional[str]=None):
self.tokens = {}
self.config_file = config_file
def parse_from_env(self) -> Dict[int, str]:
self.tokens = dict(((c + 1, t) for c, (_, t) in enumerate(filter(lambda n: n[0].startswith('MULTI_TOKEN'), sorted(environ.items())))))
return self.tokens
|
class TokenParser:
def __init__(self, config_file: Optional[str]=None):
pass
def parse_from_env(self) -> Dict[int, str]:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 0
| 5
| 0
| 0
| 2
| 2
| 2
| 2
| 15
| 1
| 14
| 6
| 11
| 0
| 7
| 5
| 4
| 1
| 0
| 0
| 2
|
328,337
|
NBBotz/Auto-Filter-Bot
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/NBBotz_Auto-Filter-Bot/Lucia/util/custom_dl.py
|
custom_dl.ByteStreamer
|
from pyrogram import Client, utils, raw
from pyrogram.session import Session, Auth
from pyrogram.file_id import FileId, FileType, ThumbnailSource
from Lucia.Bot import work_loads
from Lucia.server.exceptions import FIleNotFound
from pyrogram.errors import AuthBytesInvalid
from typing import Dict, Union
from .file_properties import get_file_ids
import asyncio
from logging_helper import LOGGER
class ByteStreamer:
def __init__(self, client: Client):
"""A custom class that holds the cache of a specific client and class functions.
attributes:
client: the client that the cache is for.
cached_file_ids: a dict of cached file IDs.
cached_file_properties: a dict of cached file properties.
functions:
generate_file_properties: returns the properties for a media of a specific message contained in Tuple.
generate_media_session: returns the media session for the DC that contains the media file.
yield_file: yield a file from telegram servers for streaming.
This is a modified version of the <https://github.com/eyaadh/megadlbot_oss/blob/master/mega/telegram/utils/custom_download.py>
Thanks to Eyaadh <https://github.com/eyaadh>
"""
self.clean_timer = 30 * 60
self.client: Client = client
self.cached_file_ids: Dict[int, FileId] = {}
asyncio.create_task(self.clean_cache())
async def get_file_properties(self, id: int) -> FileId:
"""
Returns the properties of a media of a specific message in a FIleId class.
if the properties are cached, then it'll return the cached results.
or it'll generate the properties from the Message ID and cache them.
"""
if id not in self.cached_file_ids:
await self.generate_file_properties(id)
LOGGER.info(f'Cached file properties for message with ID {id}')
return self.cached_file_ids[id]
async def generate_file_properties(self, id: int) -> FileId:
"""
Generates the properties of a media file on a specific message.
returns ths properties in a FIleId class.
"""
file_id = await get_file_ids(self.client, BIN_CHANNEL, id)
LOGGER.info(f'Generated file ID and Unique ID for message with ID {id}')
if not file_id:
LOGGER.info(f'Message with ID {id} not found')
raise FIleNotFound
self.cached_file_ids[id] = file_id
LOGGER.info(f'Cached media message with ID {id}')
return self.cached_file_ids[id]
async def generate_media_session(self, client: Client, file_id: FileId) -> Session:
"""
Generates the media session for the DC that contains the media file.
This is required for getting the bytes from Telegram servers.
"""
media_session = client.media_sessions.get(file_id.dc_id, None)
if media_session is None:
if file_id.dc_id != await client.storage.dc_id():
media_session = Session(client, file_id.dc_id, await Auth(client, file_id.dc_id, await client.storage.test_mode()).create(), await client.storage.test_mode(), is_media=True)
await media_session.start()
for _ in range(6):
exported_auth = await client.invoke(raw.functions.auth.ExportAuthorization(dc_id=file_id.dc_id))
try:
await media_session.send(raw.functions.auth.ImportAuthorization(id=exported_auth.id, bytes=exported_auth.bytes))
break
except AuthBytesInvalid:
LOGGER.error(f'Invalid authorization bytes for DC {file_id.dc_id}')
continue
else:
await media_session.stop()
raise AuthBytesInvalid
else:
media_session = Session(client, file_id.dc_id, await client.storage.auth_key(), await client.storage.test_mode(), is_media=True)
await media_session.start()
LOGGER.info(f'Created media session for DC {file_id.dc_id}')
client.media_sessions[file_id.dc_id] = media_session
else:
LOGGER.info(f'Using cached media session for DC {file_id.dc_id}')
return media_session
@staticmethod
async def get_location(file_id: FileId) -> Union[raw.types.InputPhotoFileLocation, raw.types.InputDocumentFileLocation, raw.types.InputPeerPhotoFileLocation]:
"""
Returns the file location for the media file.
"""
file_type = file_id.file_type
if file_type == FileType.CHAT_PHOTO:
if file_id.chat_id > 0:
peer = raw.types.InputPeerUser(user_id=file_id.chat_id, access_hash=file_id.chat_access_hash)
elif file_id.chat_access_hash == 0:
peer = raw.types.InputPeerChat(chat_id=-file_id.chat_id)
else:
peer = raw.types.InputPeerChannel(channel_id=utils.get_channel_id(file_id.chat_id), access_hash=file_id.chat_access_hash)
location = raw.types.InputPeerPhotoFileLocation(peer=peer, volume_id=file_id.volume_id, local_id=file_id.local_id, big=file_id.thumbnail_source == ThumbnailSource.CHAT_PHOTO_BIG)
elif file_type == FileType.PHOTO:
location = raw.types.InputPhotoFileLocation(id=file_id.media_id, access_hash=file_id.access_hash, file_reference=file_id.file_reference, thumb_size=file_id.thumbnail_size)
else:
location = raw.types.InputDocumentFileLocation(id=file_id.media_id, access_hash=file_id.access_hash, file_reference=file_id.file_reference, thumb_size=file_id.thumbnail_size)
return location
async def yield_file(self, file_id: FileId, index: int, offset: int, first_part_cut: int, last_part_cut: int, part_count: int, chunk_size: int) -> Union[str, None]:
"""
Custom generator that yields the bytes of the media file.
Modded from <https://github.com/eyaadh/megadlbot_oss/blob/master/mega/telegram/utils/custom_download.py#L20>
Thanks to Eyaadh <https://github.com/eyaadh>
"""
client = self.client
work_loads[index] += 1
LOGGER.info(f'Starting to yielding file with client {index}.')
media_session = await self.generate_media_session(client, file_id)
current_part = 1
location = await self.get_location(file_id)
try:
r = await media_session.send(raw.functions.upload.GetFile(location=location, offset=offset, limit=chunk_size))
if isinstance(r, raw.types.upload.File):
while True:
chunk = r.bytes
if not chunk:
break
elif part_count == 1:
yield chunk[first_part_cut:last_part_cut]
elif current_part == 1:
yield chunk[first_part_cut:]
elif current_part == part_count:
yield chunk[:last_part_cut]
else:
yield chunk
current_part += 1
offset += chunk_size
if current_part > part_count:
break
r = await media_session.send(raw.functions.upload.GetFile(location=location, offset=offset, limit=chunk_size))
except (TimeoutError, AttributeError):
pass
finally:
LOGGER.info('Finished yielding file with {current_part} parts.')
work_loads[index] -= 1
async def clean_cache(self) -> None:
"""
function to clean the cache to reduce memory usage
"""
while True:
await asyncio.sleep(self.clean_timer)
self.cached_file_ids.clear()
LOGGER.info('Cleaned the cache')
|
class ByteStreamer:
def __init__(self, client: Client):
'''A custom class that holds the cache of a specific client and class functions.
attributes:
client: the client that the cache is for.
cached_file_ids: a dict of cached file IDs.
cached_file_properties: a dict of cached file properties.
functions:
generate_file_properties: returns the properties for a media of a specific message contained in Tuple.
generate_media_session: returns the media session for the DC that contains the media file.
yield_file: yield a file from telegram servers for streaming.
This is a modified version of the <https://github.com/eyaadh/megadlbot_oss/blob/master/mega/telegram/utils/custom_download.py>
Thanks to Eyaadh <https://github.com/eyaadh>
'''
pass
async def get_file_properties(self, id: int) -> FileId:
'''
Returns the properties of a media of a specific message in a FIleId class.
if the properties are cached, then it'll return the cached results.
or it'll generate the properties from the Message ID and cache them.
'''
pass
async def generate_file_properties(self, id: int) -> FileId:
'''
Generates the properties of a media file on a specific message.
returns ths properties in a FIleId class.
'''
pass
async def generate_media_session(self, client: Client, file_id: FileId) -> Session:
'''
Generates the media session for the DC that contains the media file.
This is required for getting the bytes from Telegram servers.
'''
pass
@staticmethod
async def get_location(file_id: FileId) -> Union[raw.types.InputPhotoFileLocation, raw.types.InputDocumentFileLocation, raw.types.InputPeerPhotoFileLocation]:
'''
Returns the file location for the media file.
'''
pass
async def yield_file(self, file_id: FileId, index: int, offset: int, first_part_cut: int, last_part_cut: int, part_count: int, chunk_size: int) -> Union[str, None]:
'''
Custom generator that yields the bytes of the media file.
Modded from <https://github.com/eyaadh/megadlbot_oss/blob/master/mega/telegram/utils/custom_download.py#L20>
Thanks to Eyaadh <https://github.com/eyaadh>
'''
pass
async def clean_cache(self) -> None:
'''
function to clean the cache to reduce memory usage
'''
pass
| 9
| 7
| 30
| 2
| 23
| 5
| 3
| 0.23
| 0
| 6
| 1
| 0
| 6
| 3
| 7
| 7
| 217
| 21
| 160
| 36
| 140
| 36
| 87
| 24
| 79
| 9
| 0
| 4
| 24
|
328,338
|
NBBotz/Auto-Filter-Bot
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/NBBotz_Auto-Filter-Bot/Lucia/server/exceptions.py
|
exceptions.FIleNotFound
|
class FIleNotFound(Exception):
message = 'File Not Found'
|
class FIleNotFound(Exception):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 3
| 0
| 0
|
328,339
|
NBBotz/Auto-Filter-Bot
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/NBBotz_Auto-Filter-Bot/Lucia/server/exceptions.py
|
exceptions.InvalidHash
|
class InvalidHash(Exception):
message = 'Invalid Hash'
|
class InvalidHash(Exception):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 3
| 0
| 0
|
328,340
|
NBBotz/Auto-Filter-Bot
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/NBBotz_Auto-Filter-Bot/database/ia_filterdb.py
|
ia_filterdb.Media
|
from umongo import Instance, Document, fields
@instance.register
class Media(Document):
file_id = fields.StrField(attribute='_id')
file_ref = fields.StrField(allow_none=True)
file_name = fields.StrField(required=True)
file_size = fields.IntField(required=True)
file_type = fields.StrField(allow_none=True)
mime_type = fields.StrField(allow_none=True)
caption = fields.StrField(allow_none=True)
class Meta:
indexes = ('$file_name',)
collection_name = COLLECTION_NAME
| null | 3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0
| 11
| 11
| 9
| 0
| 11
| 11
| 9
| 0
| 1
| 0
| 0
|
328,341
|
NBBotz/Auto-Filter-Bot
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/NBBotz_Auto-Filter-Bot/database/ia_filterdb.py
|
ia_filterdb.Media2
|
from umongo import Instance, Document, fields
@instance2.register
class Media2(Document):
file_id = fields.StrField(attribute='_id')
file_ref = fields.StrField(allow_none=True)
file_name = fields.StrField(required=True)
file_size = fields.IntField(required=True)
file_type = fields.StrField(allow_none=True)
mime_type = fields.StrField(allow_none=True)
caption = fields.StrField(allow_none=True)
class Meta:
indexes = ('$file_name',)
collection_name = COLLECTION_NAME
| null | 3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0
| 11
| 11
| 9
| 0
| 11
| 11
| 9
| 0
| 1
| 0
| 0
|
328,342
|
NBBotz/Auto-Filter-Bot
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/NBBotz_Auto-Filter-Bot/database/refer.py
|
refer.UserTracker
|
class UserTracker:
def __init__(self):
self.user_collection = mydb['referusers']
self.refer_collection = mydb['refers']
def add_user(self, user_id):
if not self.is_user_in_list(user_id):
self.user_collection.insert_one({'user_id': user_id})
def remove_user(self, user_id):
self.user_collection.delete_one({'user_id': user_id})
def is_user_in_list(self, user_id):
return bool(self.user_collection.find_one({'user_id': user_id}))
def add_refer_points(self, user_id: int, points: int):
self.refer_collection.update_one({'user_id': user_id}, {'$set': {'points': points}}, upsert=True)
def get_refer_points(self, user_id: int):
user = self.refer_collection.find_one({'user_id': user_id})
return user.get('points') if user else 0
|
class UserTracker:
def __init__(self):
pass
def add_user(self, user_id):
pass
def remove_user(self, user_id):
pass
def is_user_in_list(self, user_id):
pass
def add_refer_points(self, user_id: int, points: int):
pass
def get_refer_points(self, user_id: int):
pass
| 7
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 0
| 2
| 0
| 0
| 6
| 2
| 6
| 6
| 25
| 5
| 20
| 10
| 13
| 0
| 16
| 10
| 9
| 2
| 0
| 1
| 8
|
328,343
|
NBBotz/Auto-Filter-Bot
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/NBBotz_Auto-Filter-Bot/database/topdb.py
|
topdb.Database
|
from motor.motor_asyncio import AsyncIOMotorClient
class Database:
def __init__(self, uri, db_name):
self.client = AsyncIOMotorClient(uri)
self.db = self.client[db_name]
self.col = self.db.user
async def update_top_messages(self, user_id, message_text):
user = await self.col.find_one({'user_id': user_id, 'messages.text': message_text})
if not user:
await self.col.update_one({'user_id': user_id}, {'$push': {'messages': {'text': message_text, 'count': 1}}}, upsert=True)
else:
await self.col.update_one({'user_id': user_id, 'messages.text': message_text}, {'$inc': {'messages.$.count': 1}})
async def get_top_messages(self, limit=30):
pipeline = [{'$unwind': '$messages'}, {'$group': {'_id': '$messages.text', 'count': {'$sum': '$messages.count'}}}, {'$sort': {'count': -1}}, {'$limit': limit}]
results = await self.col.aggregate(pipeline).to_list(limit)
return [result['_id'] for result in results]
async def delete_all_messages(self):
await self.col.delete_many({})
|
class Database:
def __init__(self, uri, db_name):
pass
async def update_top_messages(self, user_id, message_text):
pass
async def get_top_messages(self, limit=30):
pass
async def delete_all_messages(self):
pass
| 5
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 4
| 3
| 4
| 4
| 33
| 4
| 29
| 11
| 24
| 0
| 16
| 11
| 11
| 2
| 0
| 1
| 5
|
328,344
|
NBBotz/Auto-Filter-Bot
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/NBBotz_Auto-Filter-Bot/database/users_chats_db.py
|
users_chats_db.Database
|
import pytz
from datetime import timedelta
import datetime
import motor.motor_asyncio
import time
class Database:
def __init__(self, uri, database_name):
self._client = motor.motor_asyncio.AsyncIOMotorClient(uri)
self.db = self._client[database_name]
self.col = self.db.users
self.grp = self.db.groups
self.users = self.db.uersz
self.botcol = self.db.bot_settings
self.misc = self.db.misc
self.verify_id = self.db.verify_id
self.codes = self.db.codes
self.connection = self.db.connections
async def find_join_req(self, id, chnl):
chnl = str(chnl)
return bool(await self.db.request[chnl].find_one({'id': id}))
async def add_join_req(self, id, chnl):
chnl = str(chnl)
await self.db.request[chnl].insert_one({'id': id})
async def del_join_req(self):
if AUTH_REQ_CHANNEL:
for c in AUTH_REQ_CHANNEL:
c = str(c)
result = await self.db.request[c].delete_many({})
print(result)
def new_user(self, id, name):
return dict(id=id, name=name, ban_status=dict(is_banned=False, ban_reason=''))
def new_group(self, id, title):
return dict(id=id, title=title, chat_status=dict(is_disabled=False, reason=''))
async def add_user(self, id, name):
user = self.new_user(id, name)
await self.col.insert_one(user)
async def is_user_exist(self, id):
user = await self.col.find_one({'id': int(id)})
return bool(user)
async def total_users_count(self):
count = await self.col.count_documents({})
return count
async def remove_ban(self, id):
ban_status = dict(is_banned=False, ban_reason='')
await self.col.update_one({'id': id}, {'$set': {'ban_status': ban_status}})
async def ban_user(self, user_id, ban_reason='No Reason'):
ban_status = dict(is_banned=True, ban_reason=ban_reason)
await self.col.update_one({'id': user_id}, {'$set': {'ban_status': ban_status}})
async def get_ban_status(self, id):
default = dict(is_banned=False, ban_reason='')
user = await self.col.find_one({'id': int(id)})
if not user:
return default
return user.get('ban_status', default)
async def get_all_users(self):
return self.col.find({})
async def delete_user(self, user_id):
await self.col.delete_many({'id': int(user_id)})
async def delete_chat(self, id):
await self.grp.delete_many({'id': int(id)})
async def get_banned(self):
users = self.col.find({'ban_status.is_banned': True})
chats = self.grp.find({'chat_status.is_disabled': True})
b_chats = [chat['id'] async for chat in chats]
b_users = [user['id'] async for user in users]
return (b_users, b_chats)
async def add_chat(self, chat, title):
chat = self.new_group(chat, title)
await self.grp.insert_one(chat)
async def get_chat(self, chat):
chat = await self.grp.find_one({'id': int(chat)})
return False if not chat else chat.get('chat_status')
async def re_enable_chat(self, id):
chat_status = dict(is_disabled=False, reason='')
await self.grp.update_one({'id': int(id)}, {'$set': {'chat_status': chat_status}})
async def update_settings(self, id, settings):
await self.grp.update_one({'id': int(id)}, {'$set': {'settings': settings}})
async def get_settings(self, id):
default = {'button': LINK_MODE, 'botpm': P_TTI_SHOW_OFF, 'file_secure': PROTECT_CONTENT, 'imdb': IMDB, 'spell_check': SPELL_CHECK_REPLY, 'welcome': MELCOW_NEW_USERS, 'auto_delete': AUTO_DELETE, 'auto_ffilter': AUTO_FFILTER, 'max_btn': MAX_BTN, 'template': IMDB_TEMPLATE, 'log': LOG_VR_CHANNEL, 'tutorial': TUTORIAL, 'tutorial_2': TUTORIAL_2, 'tutorial_3': TUTORIAL_3, 'shortner': SHORTENER_WEBSITE, 'api': SHORTENER_API, 'shortner_two': SHORTENER_WEBSITE2, 'api_two': SHORTENER_API2, 'shortner_three': SHORTENER_WEBSITE3, 'api_three': SHORTENER_API3, 'is_verify': IS_VERIFY, 'verify_time': TWO_VERIFY_GAP, 'third_verify_time': THREE_VERIFY_GAP, 'caption': CUSTOM_FILE_CAPTION, 'fsub_id': AUTH_CHANNEL}
chat = await self.grp.find_one({'id': int(id)})
if chat and 'settings' in chat:
return chat['settings']
else:
return default.copy()
async def silentx_reset_settings(self):
try:
result = await self.grp.update_many({'settings': {'$exists': True}}, {'$unset': {'settings': ''}})
modified_count = result.modified_count
return modified_count
except Exception as e:
print(f'Error deleting settings for all groups: {str(e)}')
raise
async def disable_chat(self, chat, reason='No Reason'):
chat_status = dict(is_disabled=True, reason=reason)
await self.grp.update_one({'id': int(chat)}, {'$set': {'chat_status': chat_status}})
async def total_chat_count(self):
count = await self.grp.count_documents({})
return count
async def get_all_chats(self):
return self.grp.find({})
async def get_db_size(self):
return (await self.db.command('dbstats'))['dataSize']
async def get_user(self, user_id):
user_data = await self.users.find_one({'id': user_id})
return user_data
async def update_user(self, user_data):
await self.users.update_one({'id': user_data['id']}, {'$set': user_data}, upsert=True)
async def get_notcopy_user(self, user_id):
user_id = int(user_id)
user = await self.misc.find_one({'user_id': user_id})
ist_timezone = pytz.timezone('Asia/Kolkata')
if not user:
res = {'user_id': user_id, 'last_verified': datetime.datetime(2020, 5, 17, 0, 0, 0, tzinfo=ist_timezone), 'second_time_verified': datetime.datetime(2019, 5, 17, 0, 0, 0, tzinfo=ist_timezone)}
user = await self.misc.insert_one(res)
return user
async def update_notcopy_user(self, user_id, value: dict):
user_id = int(user_id)
myquery = {'user_id': user_id}
newvalues = {'$set': value}
return await self.misc.update_one(myquery, newvalues)
async def is_user_verified(self, user_id):
user = await self.get_notcopy_user(user_id)
try:
pastDate = user['last_verified']
except Exception:
user = await self.get_notcopy_user(user_id)
pastDate = user['last_verified']
ist_timezone = pytz.timezone('Asia/Kolkata')
pastDate = pastDate.astimezone(ist_timezone)
current_time = datetime.datetime.now(tz=ist_timezone)
seconds_since_midnight = (current_time - datetime.datetime(current_time.year, current_time.month, current_time.day, 0, 0, 0, tzinfo=ist_timezone)).total_seconds()
time_diff = current_time - pastDate
total_seconds = time_diff.total_seconds()
return total_seconds <= seconds_since_midnight
async def user_verified(self, user_id):
user = await self.get_notcopy_user(user_id)
try:
pastDate = user['second_time_verified']
except Exception:
user = await self.get_notcopy_user(user_id)
pastDate = user['second_time_verified']
ist_timezone = pytz.timezone('Asia/Kolkata')
pastDate = pastDate.astimezone(ist_timezone)
current_time = datetime.datetime.now(tz=ist_timezone)
seconds_since_midnight = (current_time - datetime.datetime(current_time.year, current_time.month, current_time.day, 0, 0, 0, tzinfo=ist_timezone)).total_seconds()
time_diff = current_time - pastDate
total_seconds = time_diff.total_seconds()
return total_seconds <= seconds_since_midnight
async def use_second_shortener(self, user_id, time):
user = await self.get_notcopy_user(user_id)
if not user.get('second_time_verified'):
ist_timezone = pytz.timezone('Asia/Kolkata')
await self.update_notcopy_user(user_id, {'second_time_verified': datetime.datetime(2019, 5, 17, 0, 0, 0, tzinfo=ist_timezone)})
user = await self.get_notcopy_user(user_id)
if await self.is_user_verified(user_id):
try:
pastDate = user['last_verified']
except Exception:
user = await self.get_notcopy_user(user_id)
pastDate = user['last_verified']
ist_timezone = pytz.timezone('Asia/Kolkata')
pastDate = pastDate.astimezone(ist_timezone)
current_time = datetime.datetime.now(tz=ist_timezone)
time_difference = current_time - pastDate
if time_difference > datetime.timedelta(seconds=time):
pastDate = user['last_verified'].astimezone(ist_timezone)
second_time = user['second_time_verified'].astimezone(ist_timezone)
return second_time < pastDate
return False
async def use_third_shortener(self, user_id, time):
user = await self.get_notcopy_user(user_id)
if not user.get('third_time_verified'):
ist_timezone = pytz.timezone('Asia/Kolkata')
await self.update_notcopy_user(user_id, {'third_time_verified': datetime.datetime(2018, 5, 17, 0, 0, 0, tzinfo=ist_timezone)})
user = await self.get_notcopy_user(user_id)
if await self.user_verified(user_id):
try:
pastDate = user['second_time_verified']
except Exception:
user = await self.get_notcopy_user(user_id)
pastDate = user['second_time_verified']
ist_timezone = pytz.timezone('Asia/Kolkata')
pastDate = pastDate.astimezone(ist_timezone)
current_time = datetime.datetime.now(tz=ist_timezone)
time_difference = current_time - pastDate
if time_difference > datetime.timedelta(seconds=time):
pastDate = user['second_time_verified'].astimezone(ist_timezone)
second_time = user['third_time_verified'].astimezone(ist_timezone)
return second_time < pastDate
return False
async def create_verify_id(self, user_id: int, hash):
res = {'user_id': user_id, 'hash': hash, 'verified': False}
return await self.verify_id.insert_one(res)
async def get_verify_id_info(self, user_id: int, hash):
return await self.verify_id.find_one({'user_id': user_id, 'hash': hash})
async def update_verify_id_info(self, user_id, hash, value: dict):
myquery = {'user_id': user_id, 'hash': hash}
newvalues = {'$set': value}
return await self.verify_id.update_one(myquery, newvalues)
async def has_premium_access(self, user_id):
user_data = await self.get_user(user_id)
if user_data:
expiry_time = user_data.get('expiry_time')
if expiry_time is None:
return False
elif isinstance(expiry_time, datetime.datetime) and datetime.datetime.now() <= expiry_time:
return True
else:
await self.users.update_one({'id': user_id}, {'$set': {'expiry_time': None}})
return False
async def update_user(self, user_data):
await self.users.update_one({'id': user_data['id']}, {'$set': user_data}, upsert=True)
async def update_one(self, filter_query, update_data):
try:
result = await self.users.update_one(filter_query, update_data)
return result.matched_count == 1
except Exception as e:
print(f'Error updating document: {e}')
return False
async def get_expired(self, current_time):
expired_users = []
cursor = self.users.find({'expiry_time': {'$lt': current_time}})
async for user in cursor:
expired_users.append(user)
return expired_users
async def get_expiring_soon(self, label, delta):
reminder_key = f'reminder_{label}_sent'
now = datetime.datetime.utcnow()
target_time = now + delta
window = timedelta(seconds=30)
start_range = target_time - window
end_range = target_time + window
reminder_users = []
cursor = self.users.find({'expiry_time': {'$gte': start_range, '$lte': end_range}, reminder_key: {'$ne': True}})
async for user in cursor:
reminder_users.append(user)
await self.users.update_one({'id': user['id']}, {'$set': {reminder_key: True}})
return reminder_users
async def remove_premium_access(self, user_id):
return await self.update_one({'id': user_id}, {'$set': {'expiry_time': None}})
async def check_trial_status(self, user_id):
user_data = await self.get_user(user_id)
if user_data:
return user_data.get('has_free_trial', False)
return False
async def give_free_trial(self, user_id):
user_id = user_id
seconds = 5 * 60
expiry_time = datetime.datetime.now() + datetime.timedelta(seconds=seconds)
user_data = {'id': user_id, 'expiry_time': expiry_time, 'has_free_trial': True}
await self.users.update_one({'id': user_id}, {'$set': user_data}, upsert=True)
async def all_premium_users(self):
count = await self.users.count_documents({'expiry_time': {'$gt': datetime.datetime.now()}})
return count
async def get_bot_setting(self, bot_id, setting_key, default_value):
bot = await self.botcol.find_one({'id': int(bot_id)}, {setting_key: 1, '_id': 0})
return bot[setting_key] if bot and setting_key in bot else default_value
async def update_bot_setting(self, bot_id, setting_key, value):
await self.botcol.update_one({'id': int(bot_id)}, {'$set': {setting_key: value}}, upsert=True)
async def connect_group(self, group_id, user_id):
user = await self.connection.find_one({'_id': user_id})
if user:
if group_id not in user['group_ids']:
await self.connection.update_one({'_id': user_id}, {'$push': {'group_ids': group_id}})
else:
await self.connection.insert_one({'_id': user_id, 'group_ids': [group_id]})
async def get_connected_grps(self, user_id):
user = await self.connection.find_one({'_id': user_id})
if user:
return user['group_ids']
else:
return []
async def get_maintenance_status(self, bot_id):
return await self.get_bot_setting(bot_id, 'MAINTENANCE_MODE', MAINTENANCE_MODE)
async def update_maintenance_status(self, bot_id, enable):
await self.update_bot_setting(bot_id, 'MAINTENANCE_MODE', enable)
async def pm_search_status(self, bot_id):
return await self.get_bot_setting(bot_id, 'PM_SEARCH', PM_SEARCH)
async def update_pm_search_status(self, bot_id, enable):
await self.update_bot_setting(bot_id, 'PM_SEARCH', enable)
async def movie_update_status(self, bot_id):
return await self.get_bot_setting(bot_id, 'MOVIE_UPDATE_NOTIFICATION', MOVIE_UPDATE_NOTIFICATION)
async def update_movie_update_status(self, bot_id, enable):
await self.update_bot_setting(bot_id, 'MOVIE_UPDATE_NOTIFICATION', enable)
|
class Database:
def __init__(self, uri, database_name):
pass
async def find_join_req(self, id, chnl):
pass
async def add_join_req(self, id, chnl):
pass
async def del_join_req(self):
pass
def new_user(self, id, name):
pass
def new_group(self, id, title):
pass
async def add_user(self, id, name):
pass
async def is_user_exist(self, id):
pass
async def total_users_count(self):
pass
async def remove_ban(self, id):
pass
async def ban_user(self, user_id, ban_reason='No Reason'):
pass
async def get_ban_status(self, id):
pass
async def get_all_users(self):
pass
async def delete_user(self, user_id):
pass
async def delete_chat(self, id):
pass
async def get_banned(self):
pass
async def add_chat(self, chat, title):
pass
async def get_chat(self, chat):
pass
async def re_enable_chat(self, id):
pass
async def update_settings(self, id, settings):
pass
async def get_settings(self, id):
pass
async def silentx_reset_settings(self):
pass
async def disable_chat(self, chat, reason='No Reason'):
pass
async def total_chat_count(self):
pass
async def get_all_chats(self):
pass
async def get_db_size(self):
pass
async def get_user(self, user_id):
pass
async def update_user(self, user_data):
pass
async def get_notcopy_user(self, user_id):
pass
async def update_notcopy_user(self, user_id, value: dict):
pass
async def is_user_verified(self, user_id):
pass
async def user_verified(self, user_id):
pass
async def use_second_shortener(self, user_id, time):
pass
async def use_third_shortener(self, user_id, time):
pass
async def create_verify_id(self, user_id: int, hash):
pass
async def get_verify_id_info(self, user_id: int, hash):
pass
async def update_verify_id_info(self, user_id, hash, value: dict):
pass
async def has_premium_access(self, user_id):
pass
async def update_user(self, user_data):
pass
async def update_one(self, filter_query, update_data):
pass
async def get_expired(self, current_time):
pass
async def get_expiring_soon(self, label, delta):
pass
async def remove_premium_access(self, user_id):
pass
async def check_trial_status(self, user_id):
pass
async def give_free_trial(self, user_id):
pass
async def all_premium_users(self):
pass
async def get_bot_setting(self, bot_id, setting_key, default_value):
pass
async def update_bot_setting(self, bot_id, setting_key, value):
pass
async def connect_group(self, group_id, user_id):
pass
async def get_connected_grps(self, user_id):
pass
async def get_maintenance_status(self, bot_id):
pass
async def update_maintenance_status(self, bot_id, enable):
pass
async def pm_search_status(self, bot_id):
pass
async def update_pm_search_status(self, bot_id, enable):
pass
async def movie_update_status(self, bot_id):
pass
async def update_movie_update_status(self, bot_id, enable):
pass
| 57
| 0
| 6
| 0
| 6
| 0
| 2
| 0.01
| 0
| 7
| 0
| 0
| 56
| 10
| 56
| 56
| 414
| 58
| 354
| 147
| 297
| 2
| 274
| 145
| 217
| 5
| 0
| 2
| 84
|
328,345
|
NBBotz/Auto-Filter-Bot
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/NBBotz_Auto-Filter-Bot/utils.py
|
utils.temp
|
import os
class temp(object):
BANNED_USERS = []
BANNED_CHATS = []
SETTINGS = {}
ME = None
CURRENT = int(os.environ.get('SKIP', 2))
CANCEL = False
B_USERS_CANCEL = False
B_GROUPS_CANCEL = False
MELCOW = {}
U_NAME = None
B_NAME = None
B_LINK = None
GETALL = {}
SHORT = {}
IMDB_CAP = {}
VERIFICATIONS = {}
|
class temp(object):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 17
| 0
| 17
| 17
| 16
| 0
| 17
| 17
| 16
| 0
| 1
| 0
| 0
|
328,346
|
ImperialCollegeLondon/prefect-managedfiletransfer
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/ImperialCollegeLondon_prefect-managedfiletransfer/prefect_managedfiletransfer/AssetDownloadResult.py
|
prefect_managedfiletransfer.AssetDownloadResult.AssetDownloadResult
|
from pathlib import Path
from datetime import datetime
class AssetDownloadResult:
"""
Represents the result of an asset download operation.
"""
def __init__(self, success: bool, file_path: Path | None, download_skipped: bool=False, last_modified: datetime | None=None, size: int=0, error: str | None=None):
"""
Represents the result of an asset download operation.
:param success: True if the download was successful, False otherwise.
:param file_path: The path to the downloaded file, or None if the download failed.
:param download_skipped: True if the download was skipped because the file was not newer than the destination file.
:param last_modified: The last modified time of the file, if available.
"""
self.last_modified = last_modified
self.success = success
self.file_path = file_path
self.download_skipped = download_skipped
self.error = error
self.size = size
if not success and file_path is not None:
raise ValueError('If success is False, file_path must be None')
def __repr__(self):
if self.error:
return f'AssetDownloadResult(success={self.success}, error={self.error})'
return f'AssetDownloadResult(success={self.success}, file_path={self.file_path}, download_skipped={self.download_skipped}, last_modified={self.last_modified})'
|
class AssetDownloadResult:
'''
Represents the result of an asset download operation.
'''
def __init__(self, success: bool, file_path: Path | None, download_skipped: bool=False, last_modified: datetime | None=None, size: int=0, error: str | None=None):
'''
Represents the result of an asset download operation.
:param success: True if the download was successful, False otherwise.
:param file_path: The path to the downloaded file, or None if the download failed.
:param download_skipped: True if the download was skipped because the file was not newer than the destination file.
:param last_modified: The last modified time of the file, if available.
'''
pass
def __repr__(self):
pass
| 3
| 2
| 15
| 1
| 11
| 4
| 2
| 0.45
| 0
| 6
| 0
| 0
| 2
| 6
| 2
| 2
| 36
| 4
| 22
| 17
| 11
| 10
| 14
| 9
| 11
| 2
| 0
| 1
| 4
|
328,347
|
ImperialCollegeLondon/prefect-managedfiletransfer
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/ImperialCollegeLondon_prefect-managedfiletransfer/prefect_managedfiletransfer/FileMatcher.py
|
prefect_managedfiletransfer.FileMatcher.FileMatcher
|
from pathlib import Path
from prefect_managedfiletransfer.SortFilesBy import SortFilesBy
from datetime import timedelta
from pydantic import BaseModel, Field
class FileMatcher(BaseModel):
"""
Represents a file matcher with a source path and a pattern to match files.
This is used to find files in a directory that match a specific pattern.
"""
source_folder: Path = Field(default=Path('.'), description='Path to the source directory to look for files.')
pattern_to_match: str = Field(default='*', description="Pattern to match files in the source directory. Supports glob patterns like '*.txt' or 'file_*.csv'.")
minimum_age: str | int | timedelta | None = Field(default=None, description='Only transfer files older than this in secs (or other time with suffix s|m|h|d|w|month|year). Default off.')
maximum_age: str | int | timedelta | None = Field(default=None, description='Only transfer files newer than this in secs (or other time with suffix s|m|h|d|w|month|year). Default off.')
sort: SortFilesBy = Field(default=SortFilesBy.PATH_ASC, description='Sort files by a specific attribute. Default is PATH_ASC.')
skip: int = Field(default=0, description='Number of files to skip in the sorted list. Default is 0.')
take: int | None = Field(default=None, description='Number of files to take from the sorted list. If None, all files are taken. Default is None.')
|
class FileMatcher(BaseModel):
'''
Represents a file matcher with a source path and a pattern to match files.
This is used to find files in a directory that match a specific pattern.
'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0.14
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 34
| 1
| 29
| 8
| 28
| 4
| 8
| 8
| 7
| 0
| 5
| 0
| 0
|
328,348
|
ImperialCollegeLondon/prefect-managedfiletransfer
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/ImperialCollegeLondon_prefect-managedfiletransfer/prefect_managedfiletransfer/FileToFolderMapping.py
|
prefect_managedfiletransfer.FileToFolderMapping.FileToFolderMapping
|
from pydantic import BaseModel, Field
from prefect_managedfiletransfer.RemoteAsset import RemoteAsset
from pathlib import Path
class FileToFolderMapping(BaseModel):
source_path_pattern_to_match: str = Field(default='*', description="Pattern to match files against in the source directory. Supports glob patterns. E.g. 'folder/*.txt' or '*path*/file.*'")
destination_folder: Path = Field(default=Path('.'), description='Path of the destination folder files matching the pattern should be placed into')
def is_match(self, file_path: Path) -> bool:
"""
Check if the given file path matches the source path pattern.
"""
if not self.source_path_pattern_to_match:
return False
if not file_path:
return False
return file_path.match(self.source_path_pattern_to_match)
def __init__(self, source_path_pattern_to_match: str='*', destination_folder: str='.'):
super().__init__(source_path_pattern_to_match=source_path_pattern_to_match, destination_folder=Path(destination_folder))
@staticmethod
def apply_mappings(mappings: list['FileToFolderMapping'], source_files: list[RemoteAsset], destination_path) -> list[tuple[RemoteAsset, Path]]:
source_destination_pairs = []
for remote_asset in source_files:
logger.info(f'Found file: {remote_asset}')
target_file_path: Path | None = None
for mapping in mappings:
if mapping.is_match(remote_asset.path):
if not destination_path:
target_file_path = mapping.destination_folder / remote_asset.path.name
elif not mapping.destination_folder or mapping.destination_folder == Path('.'):
target_file_path = destination_path / remote_asset.path.name
else:
target_file_path = destination_path / mapping.destination_folder / remote_asset.path.name
logger.info(f'File {remote_asset.path} matched {mapping.source_path_pattern_to_match} -> {mapping.destination_folder}')
break
if not target_file_path:
logger.info(f'No mapping found for {remote_asset.path}, using default destination path')
target_file_path = destination_path / remote_asset.path.name
assert target_file_path is not None
source_destination_pairs.append((remote_asset, target_file_path))
return source_destination_pairs
|
class FileToFolderMapping(BaseModel):
def is_match(self, file_path: Path) -> bool:
'''
Check if the given file path matches the source path pattern.
'''
pass
def __init__(self, source_path_pattern_to_match: str='*', destination_folder: str='.'):
pass
@staticmethod
def apply_mappings(mappings: list['FileToFolderMapping'], source_files: list[RemoteAsset], destination_path) -> list[tuple[RemoteAsset, Path]]:
pass
| 5
| 1
| 22
| 3
| 18
| 1
| 4
| 0.08
| 1
| 7
| 1
| 0
| 2
| 0
| 3
| 85
| 81
| 13
| 63
| 19
| 50
| 5
| 30
| 10
| 26
| 7
| 5
| 4
| 11
|
328,349
|
ImperialCollegeLondon/prefect-managedfiletransfer
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/ImperialCollegeLondon_prefect-managedfiletransfer/prefect_managedfiletransfer/PathUtil.py
|
prefect_managedfiletransfer.PathUtil.PathUtil
|
from prefect_managedfiletransfer.RemoteConnectionType import RemoteConnectionType
from pathlib import Path
class PathUtil:
@staticmethod
def resolve_path(path_type: RemoteConnectionType, basepath: Path | str | None, path: Path | str) -> Path:
if path_type == RemoteConnectionType.LOCAL:
remote_source_path = PathUtil._local_resolve_path(basepath, path, validate=True)
else:
remote_source_path = PathUtil._resolve_remote_path(basepath, path, validate=True)
return remote_source_path
@staticmethod
def _local_resolve_path(basepath: str | Path | None, path: str | Path, validate: bool=False) -> Path:
resolved_basepath = Path(basepath).expanduser().resolve() if basepath else Path('.').resolve()
if path is None:
return resolved_basepath
resolved_path: Path = Path(path).expanduser()
if not resolved_path.is_absolute():
resolved_path = resolved_basepath / resolved_path
else:
resolved_path = resolved_path.resolve()
if validate:
if resolved_basepath not in resolved_path.parents and resolved_basepath != resolved_path:
raise ValueError(f'Provided path {resolved_path} is outside of the base path {resolved_basepath}.')
return resolved_path
@staticmethod
def _resolve_remote_path(basepath: str | Path | None, path: str | Path, validate: bool=False) -> Path:
if path is None and basepath is None:
return Path('.')
elif path is None:
return basepath
path = str(path).strip()
basepath = str(basepath).strip() if basepath else ''
if not path.startswith('/') and len(basepath) > 0:
resolved_path = f"{basepath.rstrip('/')}/{path}"
else:
resolved_path = path
if validate:
if len(basepath) > 0 and Path(basepath) not in Path(resolved_path).parents and (basepath != resolved_path):
raise ValueError(f'Provided path {resolved_path} is outside of the base path {basepath}.')
if len(basepath) > 0 and '..' in resolved_path:
raise ValueError(f"Provided path {resolved_path} contains '..' so cannot be validated to be within basepath. Remove basepath if .. is required")
return Path(resolved_path)
|
class PathUtil:
@staticmethod
def resolve_path(path_type: RemoteConnectionType, basepath: Path | str | None, path: Path | str) -> Path:
pass
@staticmethod
def _local_resolve_path(basepath: str | Path | None, path: str | Path, validate: bool=False) -> Path:
pass
@staticmethod
def _resolve_remote_path(basepath: str | Path | None, path: str | Path, validate: bool=False) -> Path:
pass
| 7
| 0
| 24
| 3
| 21
| 1
| 5
| 0.05
| 0
| 5
| 1
| 0
| 0
| 0
| 3
| 3
| 79
| 10
| 66
| 19
| 51
| 3
| 33
| 8
| 29
| 8
| 0
| 2
| 16
|
328,350
|
ImperialCollegeLondon/prefect-managedfiletransfer
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/ImperialCollegeLondon_prefect-managedfiletransfer/prefect_managedfiletransfer/RCloneCommandBuilder.py
|
prefect_managedfiletransfer.RCloneCommandBuilder.RCloneCommandBuilder
|
from pathlib import Path
import platform
import subprocess
from prefect_managedfiletransfer.RCloneConfig import RCloneConfig
import importlib
import importlib.resources
class RCloneCommandBuilder:
def __init__(self, rclone_config_file: Path | None=None, rclone_config: RCloneConfig | None=None):
self.rclone_config = rclone_config
self.rclone_config_file = rclone_config_file
self._rclone_executable = 'rclone'
packaged_folder = Path(str(importlib.resources.files('prefect_managedfiletransfer').joinpath('rclone')))
packaged_executable = packaged_folder / self._rclone_executable
if platform.system() == 'Windows':
self._rclone_executable = 'rclone.exe'
packaged_executable = packaged_folder / self._rclone_executable
logger.debug(f'rclone windows packaged executable is at {packaged_executable}')
elif platform.system() == 'Darwin':
packaged_executable = packaged_folder / 'osx' / self._rclone_executable
logger.debug(f'rclone macOS packaged executable is at {packaged_executable}')
else:
logger.debug(f'rclone linux packaged executable is at {packaged_executable}')
try:
subprocess.run(f'{self._rclone_executable} version', shell=True, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
logger.debug(f"Using rclone executable on PATH: '{self._rclone_executable}'")
except subprocess.CalledProcessError:
logger.warning('rclone is not available on PATH - using packaged version at ' + str(packaged_executable))
self._rclone_executable = str(packaged_executable)
try:
subprocess.run([self._rclone_executable, 'version'], check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except subprocess.CalledProcessError:
logger.critical(f'rclone executable {self._rclone_executable} is not available or not executable')
raise FileNotFoundError(f'rclone executable {self._rclone_executable} is not available or not executable')
self.rclone_command = [self._rclone_executable]
if rclone_config_file is not None and rclone_config is not None:
raise ValueError('rclone_config_file and rclone_config_contents cannot be used at the same time')
if rclone_config_file is not None and (not rclone_config_file.exists()):
logger.critical(f'rclone config file {rclone_config_file} does not exist')
raise FileNotFoundError(f'rclone config file {rclone_config_file} does not exist')
if rclone_config_file is not None and rclone_config_file.exists():
logger.info(f'Using rclone config file {rclone_config_file.absolute()}')
self.rclone_command.insert(1, '--config')
self.rclone_command.insert(2, str(rclone_config_file.absolute()))
elif rclone_config is not None:
logger.info(f'Using rclone config for remote {rclone_config.remote_name}')
else:
logger.info('No rclone config file or rclone config object provided, using default rclone config, probably [HOME]/.config/rclone/rclone.conf')
def uploadTo(self, source_file, destination_file: Path, update_only_if_newer_mode=False) -> 'RCloneCommandBuilder':
rclone_remote_pathstr = str(destination_file).lstrip()
rclone_remote_pathstr = self._apply_remote_prefix(rclone_remote_pathstr)
return self.copyTo(source_file, update_only_if_newer_mode, rclone_remote_pathstr)
def downloadTo(self, source_file, destination_file, update_only_if_newer_mode=False) -> 'RCloneCommandBuilder':
rclone_remote_pathstr = str(source_file).lstrip()
rclone_remote_pathstr = self._apply_remote_prefix(rclone_remote_pathstr)
return self.copyTo(rclone_remote_pathstr, update_only_if_newer_mode, destination_file)
def deleteFile(self, remote_file) -> 'RCloneCommandBuilder':
rclone_remote_pathstr = str(remote_file).lstrip()
rclone_remote_pathstr = self._apply_remote_prefix(rclone_remote_pathstr)
logger.info(f'Using rclone to delete {str(remote_file)}')
self.rclone_command += ['deletefile', rclone_remote_pathstr]
return self
def copyTo(self, source_file, update_only_if_newer_mode, rclone_remote_pathstr):
if update_only_if_newer_mode:
logger.info('Using rclone with --update flag to skip files that are newer on the destination')
self.rclone_command.append('--update')
logger.info(f'Using rclone to copy {str(source_file)} to {rclone_remote_pathstr}')
self.rclone_command += ['copyto', str(source_file), rclone_remote_pathstr, '--check-first', '--checksum', '--max-duration', '30m']
return self
def _apply_remote_prefix(self, rclone_remote_pathstr):
if self.rclone_config is not None and (not rclone_remote_pathstr.startswith(f'{self.rclone_config.remote_name}:')):
logger.debug(f'Destination {rclone_remote_pathstr} does not start with {self.rclone_config.remote_name}:, adding it')
rclone_remote_pathstr = f'{self.rclone_config.remote_name}:{rclone_remote_pathstr}'
return rclone_remote_pathstr
def lsf(self, remote_folder: Path, pattern_to_match: str | None=None) -> 'RCloneCommandBuilder':
rclone_remote_pathstr = str(remote_folder).rstrip('/')
rclone_remote_pathstr = self._apply_remote_prefix(rclone_remote_pathstr)
self.rclone_command += ['lsf', '--files-only', '--format', 'tsp', rclone_remote_pathstr]
if pattern_to_match:
logger.info(f'Using rclone to list files in {rclone_remote_pathstr} matching pattern {pattern_to_match}')
self.rclone_command += ['--include', pattern_to_match]
return self
def build(self, custom_executable_path: str | Path=None) -> list[str]:
"""
Build the rclone command as a list of strings.
"""
if custom_executable_path:
logger.info(f'Using custom rclone executable at {custom_executable_path}')
self._rclone_executable = custom_executable_path
self.rclone_command[0] = str(self._rclone_executable)
return self.rclone_command.copy()
|
class RCloneCommandBuilder:
def __init__(self, rclone_config_file: Path | None=None, rclone_config: RCloneConfig | None=None):
pass
def uploadTo(self, source_file, destination_file: Path, update_only_if_newer_mode=False) -> 'RCloneCommandBuilder':
pass
def downloadTo(self, source_file, destination_file, update_only_if_newer_mode=False) -> 'RCloneCommandBuilder':
pass
def deleteFile(self, remote_file) -> 'RCloneCommandBuilder':
pass
def copyTo(self, source_file, update_only_if_newer_mode, rclone_remote_pathstr):
pass
def _apply_remote_prefix(self, rclone_remote_pathstr):
pass
def lsf(self, remote_folder: Path, pattern_to_match: str | None=None) -> 'RCloneCommandBuilder':
pass
def build(self, custom_executable_path: str | Path=None) -> list[str]:
'''
Build the rclone command as a list of strings.
'''
pass
| 9
| 1
| 25
| 3
| 21
| 1
| 3
| 0.05
| 0
| 7
| 1
| 0
| 8
| 4
| 8
| 8
| 206
| 32
| 170
| 38
| 142
| 9
| 77
| 19
| 68
| 9
| 0
| 1
| 20
|
328,351
|
ImperialCollegeLondon/prefect-managedfiletransfer
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/ImperialCollegeLondon_prefect-managedfiletransfer/prefect_managedfiletransfer/RCloneConfig.py
|
prefect_managedfiletransfer.RCloneConfig.RCloneConfig
|
class RCloneConfig:
def __init__(self, remote_name: str):
self.remote_name = remote_name
self._config_contents: str
def get_config(self):
return self._config_contents
async def update_config(self, config_contents: str):
self._config_contents = config_contents
|
class RCloneConfig:
def __init__(self, remote_name: str):
pass
def get_config(self):
pass
async def update_config(self, config_contents: str):
pass
| 4
| 0
| 2
| 0
| 2
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 3
| 2
| 3
| 3
| 10
| 2
| 8
| 6
| 4
| 0
| 8
| 6
| 4
| 1
| 0
| 0
| 3
|
328,352
|
ImperialCollegeLondon/prefect-managedfiletransfer
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/ImperialCollegeLondon_prefect-managedfiletransfer/prefect_managedfiletransfer/RCloneConfigFileBlock.py
|
prefect_managedfiletransfer.RCloneConfigFileBlock.RCloneConfigFileBlock
|
from pydantic import Field
from prefect.blocks.core import Block
class RCloneConfigFileBlock(Block):
"""
Block for storing RClone configuration file contents.
This block is used to store the contents of an RClone configuration file, which can be used to configure RClone for file transfers.
The block is updated with tokends when they are refreshed, allowing for dynamic updates to the RClone configuration.
Generate a config locally with `rclone config create my_sharepoint onedrive` like below, then save the contents in a block with remote_name=my_sharepoint,config_file_contents=
[my_sharepoint]
type = onedrive
token = {"access_token":"...","token_type":"Bearer","refresh_token":"...","expiry":"2000-00-00T00:00:00.000000000Z"}
drive_id = b!-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
drive_type = documentLibrary
Attributes:
remote_name (str): The name of the remote connection.
config_file_contents (str): The contents of the RClone configuration file.
Example:
Load a stored value:
```python
from prefect_managedfiletransfer import RCloneConfigFileBlock
block = RCloneConfigFileBlock.load("BLOCK_NAME")
```
Creating a block:
```python
from prefect_managedfiletransfer import RCloneConfigFileBlock
block = RCloneConfigFileBlock(
remote_name="my_sharepoint",
config_file_contents="..."
)
block.save("my_sharepoint_block", overwrite=True)
```
"""
_block_type_name = 'RClone Remote Config File [ManagedFileTransfer]'
_logo_url = 'https://github.com/rclone/rclone/blob/master/graphics/logo/logo_symbol/logo_symbol_color_64px.png?raw=true'
_documentation_url = 'https://ImperialCollegeLondon.github.io/prefect-managedfiletransfer/blocks/#prefect-managedfiletransfer.blocks.RCloneConfigFileBlock'
remote_name: str = Field(title='Remote Name', description='The name of the remote connection in the RClone configuration.')
config_file_contents: str = Field(title='Config File Contents', description='The contents of the RClone configuration file.')
@classmethod
def seed_value_for_example(cls):
"""
Seeds the field, value, so the block can be loaded.
"""
block = cls(remote_name='my_sharepoint', config_file_contents='\n [my_sharepoint]\n type = onedrive\n token = {"access_token":"...","token_type":"Bearer","refresh_token":"...","expiry":"2000-00-00T00:00:00.000000000Z"}\n drive_id = b!-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n drive_type = documentLibrary\n ')
block.save('sample-block', overwrite=True)
|
class RCloneConfigFileBlock(Block):
'''
Block for storing RClone configuration file contents.
This block is used to store the contents of an RClone configuration file, which can be used to configure RClone for file transfers.
The block is updated with tokends when they are refreshed, allowing for dynamic updates to the RClone configuration.
Generate a config locally with `rclone config create my_sharepoint onedrive` like below, then save the contents in a block with remote_name=my_sharepoint,config_file_contents=
[my_sharepoint]
type = onedrive
token = {"access_token":"...","token_type":"Bearer","refresh_token":"...","expiry":"2000-00-00T00:00:00.000000000Z"}
drive_id = b!-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
drive_type = documentLibrary
Attributes:
remote_name (str): The name of the remote connection.
config_file_contents (str): The contents of the RClone configuration file.
Example:
Load a stored value:
```python
from prefect_managedfiletransfer import RCloneConfigFileBlock
block = RCloneConfigFileBlock.load("BLOCK_NAME")
```
Creating a block:
```python
from prefect_managedfiletransfer import RCloneConfigFileBlock
block = RCloneConfigFileBlock(
remote_name="my_sharepoint",
config_file_contents="..."
)
block.save("my_sharepoint_block", overwrite=True)
```
'''
@classmethod
def seed_value_for_example(cls):
'''
Seeds the field, value, so the block can be loaded.
'''
pass
| 3
| 2
| 16
| 1
| 12
| 3
| 1
| 1.32
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 65
| 8
| 25
| 9
| 22
| 33
| 9
| 8
| 7
| 1
| 1
| 0
| 1
|
328,353
|
ImperialCollegeLondon/prefect-managedfiletransfer
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/ImperialCollegeLondon_prefect-managedfiletransfer/prefect_managedfiletransfer/RCloneConfigSavedInPrefect.py
|
prefect_managedfiletransfer.RCloneConfigSavedInPrefect.RCloneConfigSavedInPrefect
|
from prefect_managedfiletransfer import RCloneConfigFileBlock
from prefect_managedfiletransfer.RCloneConfig import RCloneConfig
class RCloneConfigSavedInPrefect(RCloneConfig):
"""
RCloneDynamicConfig that uses a Prefect block to store the rclone config file contents. Is saved after sucessful uploads to update the token when it is refreshed
"""
def __init__(self, block: RCloneConfigFileBlock):
self._block = block
self.remote_name = block.remote_name
def get_config(self):
return self._block.config_file_contents
async def update_config(self, config_contents: str):
self._block.config_file_contents = config_contents
await self._block.save(overwrite=True)
|
class RCloneConfigSavedInPrefect(RCloneConfig):
'''
RCloneDynamicConfig that uses a Prefect block to store the rclone config file contents. Is saved after sucessful uploads to update the token when it is refreshed
'''
def __init__(self, block: RCloneConfigFileBlock):
pass
def get_config(self):
pass
async def update_config(self, config_contents: str):
pass
| 4
| 1
| 3
| 0
| 3
| 0
| 1
| 0.33
| 1
| 1
| 0
| 0
| 3
| 2
| 3
| 6
| 15
| 3
| 9
| 6
| 5
| 3
| 9
| 6
| 5
| 1
| 1
| 0
| 3
|
328,354
|
ImperialCollegeLondon/prefect-managedfiletransfer
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/ImperialCollegeLondon_prefect-managedfiletransfer/prefect_managedfiletransfer/RemoteAsset.py
|
prefect_managedfiletransfer.RemoteAsset.RemoteAsset
|
from datetime import datetime
from pathlib import Path
class RemoteAsset:
"""
Represents a remote asset with its path and last modified time.
"""
def __init__(self, path: Path, last_modified: datetime, size: int | None=None):
self.path: Path = path
self.last_modified: datetime = last_modified
self.size: int | None = size
def __repr__(self):
return f'RemoteAsset(path={self.path}, last_modified={self.last_modified}, size={self.size})'
|
class RemoteAsset:
'''
Represents a remote asset with its path and last modified time.
'''
def __init__(self, path: Path, last_modified: datetime, size: int | None=None):
pass
def __repr__(self):
pass
| 3
| 1
| 3
| 0
| 3
| 0
| 1
| 0.43
| 0
| 3
| 0
| 0
| 2
| 3
| 2
| 2
| 12
| 2
| 7
| 6
| 4
| 3
| 7
| 6
| 4
| 1
| 0
| 0
| 2
|
328,355
|
ImperialCollegeLondon/prefect-managedfiletransfer
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/ImperialCollegeLondon_prefect-managedfiletransfer/prefect_managedfiletransfer/RemoteConnectionType.py
|
prefect_managedfiletransfer.RemoteConnectionType.RemoteConnectionType
|
from enum import Enum
class RemoteConnectionType(Enum):
LOCAL = 'local'
SFTP = 'sftp'
RCLONE = 'rclone'
|
class RemoteConnectionType(Enum):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 4
| 0
| 4
| 4
| 3
| 0
| 4
| 4
| 3
| 0
| 4
| 0
| 0
|
328,356
|
ImperialCollegeLondon/prefect-managedfiletransfer
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/ImperialCollegeLondon_prefect-managedfiletransfer/prefect_managedfiletransfer/ServerWithBasicAuthBlock.py
|
prefect_managedfiletransfer.ServerWithBasicAuthBlock.ServerWithBasicAuthBlock
|
from prefect_managedfiletransfer.constants import CONSTANTS
from pydantic import Field, SecretStr
from prefect.blocks.core import Block
from typing import Optional
class ServerWithBasicAuthBlock(Block):
"""
A connection to a remote server with basic authentication.
Attributes:
username (str): The username for authentication.
password (SecretStr): The password for authentication.
host (str): The host of the server.
port (int): The port of the server.
Example:
Load a stored value:
```python
from prefect_managedfiletransfer import ServerWithBasicAuthBlock
block = ServerWithBasicAuthBlock.load("BLOCK_NAME")
```
"""
_block_type_name = 'Server - Basic Auth [ManagedFileTransfer]'
_logo_url = CONSTANTS.SERVER_LOGO_URL
_documentation_url = 'https://ImperialCollegeLondon.github.io/prefect-managedfiletransfer/blocks/#prefect-managedfiletransfer.blocks.ServerWithBasicAuthBlock'
username: str = Field(title='The username for authentication.', description='The username for authentication.')
password: Optional[SecretStr] = Field(default=None, title='The password for authentication.', description='The password for authentication.')
host: str = Field(title='The host of the server.', description='The host of the server.')
port: int = Field(default=22, description='The port of the server.')
@classmethod
def seed_value_for_example(cls):
"""
Seeds the field, value, so the block can be loaded.
"""
block = cls(username='example_user', password=SecretStr('example_password'), host='example.com', port=22)
block.save('sample-block', overwrite=True)
def isValid(self) -> bool:
"""Checks if the server credentials are available and valid."""
return self.username and self.password.get_secret_value() and self.host and (self.port > 0)
|
class ServerWithBasicAuthBlock(Block):
'''
A connection to a remote server with basic authentication.
Attributes:
username (str): The username for authentication.
password (SecretStr): The password for authentication.
host (str): The host of the server.
port (int): The port of the server.
Example:
Load a stored value:
```python
from prefect_managedfiletransfer import ServerWithBasicAuthBlock
block = ServerWithBasicAuthBlock.load("BLOCK_NAME")
```
'''
@classmethod
def seed_value_for_example(cls):
'''
Seeds the field, value, so the block can be loaded.
'''
pass
def isValid(self) -> bool:
'''Checks if the server credentials are available and valid.'''
pass
| 4
| 3
| 10
| 0
| 8
| 2
| 1
| 0.58
| 1
| 2
| 0
| 0
| 1
| 0
| 2
| 2
| 57
| 6
| 33
| 12
| 29
| 19
| 13
| 11
| 10
| 1
| 1
| 0
| 2
|
328,357
|
ImperialCollegeLondon/prefect-managedfiletransfer
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/ImperialCollegeLondon_prefect-managedfiletransfer/prefect_managedfiletransfer/ServerWithPublicKeyAuthBlock.py
|
prefect_managedfiletransfer.ServerWithPublicKeyAuthBlock.ServerWithPublicKeyAuthBlock
|
from pydantic import Field
from pydantic import SecretStr
from prefect.blocks.core import Block
from prefect_managedfiletransfer.constants import CONSTANTS
class ServerWithPublicKeyAuthBlock(Block):
"""
Block for storing SFTP server details with public key authentication.
Attributes:
username: The username for SFTP authentication.
private_key: The private key for SFTP authentication, stored as a SecretStr.
host: The hostname or IP address of the SFTP server.
port: The port number for SFTP, default is 22.
Example:
Load a stored value:
```python
from prefect_managedfiletransfer import ServerWithPublicKeyAuthBlock
block = ServerWithPublicKeyAuthBlock.load("BLOCK_NAME")
```
Creating a block:
```python
from prefect_managedfiletransfer import ServerWithPublicKeyAuthBlock
block = ServerWithPublicKeyAuthBlock(
username="example_user",
private_key=SecretStr("example_private_key"),
host="example.com",
port=22,
)
"""
_logo_url = CONSTANTS.SERVER_LOGO_URL
_block_type_name = 'Server - Public Key Auth [ManagedFileTransfer]'
_documentation_url = 'https://ImperialCollegeLondon.github.io/prefect-managedfiletransfer/blocks/#prefect-managedfiletransfer.blocks.ServerWithPublicKeyAuthBlock'
username: str = Field(title='The username for authentication.', description='The username for authentication.')
private_key: SecretStr = Field(title='The private key for authentication.', description='The private key for authentication.')
host: str = Field(title='The host of the server.', description='The host of the server.')
port: int = Field(default=22, description='The port of the server.')
def is_valid(self) -> bool:
"""Checks if the server credentials are available and valid."""
return self.username and self.private_key.get_secret_value() and self.host and (self.port > 0)
def get_temp_key_file(self) -> _TemporaryKeyFile:
"""
Returns a context manager that provides a temporary file with the private key.
The file is automatically deleted when the context is exited.
"""
return _TemporaryKeyFile(self.private_key)
@classmethod
def seed_value_for_example(cls):
"""
Seeds the field, value, so the block can be loaded.
"""
block = cls(username='example_user', private_key=SecretStr('example_private_key'), host='example.com', port=22)
block.save('sample-block', overwrite=True)
|
class ServerWithPublicKeyAuthBlock(Block):
'''
Block for storing SFTP server details with public key authentication.
Attributes:
username: The username for SFTP authentication.
private_key: The private key for SFTP authentication, stored as a SecretStr.
host: The hostname or IP address of the SFTP server.
port: The port number for SFTP, default is 22.
Example:
Load a stored value:
```python
from prefect_managedfiletransfer import ServerWithPublicKeyAuthBlock
block = ServerWithPublicKeyAuthBlock.load("BLOCK_NAME")
```
Creating a block:
```python
from prefect_managedfiletransfer import ServerWithPublicKeyAuthBlock
block = ServerWithPublicKeyAuthBlock(
username="example_user",
private_key=SecretStr("example_private_key"),
host="example.com",
port=22,
)
'''
def is_valid(self) -> bool:
'''Checks if the server credentials are available and valid.'''
pass
def get_temp_key_file(self) -> _TemporaryKeyFile:
'''
Returns a context manager that provides a temporary file with the private key.
The file is automatically deleted when the context is exited.
'''
pass
@classmethod
def seed_value_for_example(cls):
'''
Seeds the field, value, so the block can be loaded.
'''
pass
| 5
| 4
| 9
| 0
| 6
| 3
| 1
| 0.94
| 1
| 3
| 1
| 0
| 2
| 0
| 3
| 3
| 72
| 7
| 34
| 13
| 29
| 32
| 15
| 12
| 11
| 1
| 1
| 0
| 3
|
328,358
|
ImperialCollegeLondon/prefect-managedfiletransfer
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/ImperialCollegeLondon_prefect-managedfiletransfer/prefect_managedfiletransfer/ServerWithPublicKeyAuthBlock.py
|
prefect_managedfiletransfer.ServerWithPublicKeyAuthBlock._TemporaryKeyFile
|
import tempfile
import logging
from pydantic import SecretStr
from pathlib import Path
class _TemporaryKeyFile:
def __init__(self, private_key: SecretStr):
self.private_key = private_key
self._tempfile = None
def __enter__(self):
self._tempfile = tempfile.NamedTemporaryFile('w')
self._tempfile.write(self.private_key.get_secret_value())
self._tempfile.flush()
logging.debug(f'Created temp key file {self._tempfile.name}')
return self
def __exit__(self, exc, value, tb):
result = self._tempfile.__exit__(exc, value, tb)
self.close()
return result
def close(self):
if self._tempfile is None:
return
self._tempfile.close()
def get_path(self):
if self._tempfile is None:
raise ValueError('Temporary key file has not been created yet.')
return Path(self._tempfile.name)
|
class _TemporaryKeyFile:
def __init__(self, private_key: SecretStr):
pass
def __enter__(self):
pass
def __exit__(self, exc, value, tb):
pass
def close(self):
pass
def get_path(self):
pass
| 6
| 0
| 5
| 0
| 4
| 0
| 1
| 0
| 0
| 3
| 0
| 0
| 5
| 2
| 5
| 5
| 28
| 6
| 22
| 9
| 16
| 0
| 22
| 9
| 16
| 2
| 0
| 1
| 7
|
328,359
|
ImperialCollegeLondon/prefect-managedfiletransfer
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/ImperialCollegeLondon_prefect-managedfiletransfer/prefect_managedfiletransfer/SortFilesBy.py
|
prefect_managedfiletransfer.SortFilesBy.SortFilesBy
|
from enum import Enum
class SortFilesBy(Enum):
PATH_ASC = 'path_asc'
PATH_DESC = 'path_desc'
SIZE_ASC = 'size_asc'
SIZE_DESC = 'size_desc'
DATE_ASC = 'date_asc'
DATE_DESC = 'date_desc'
def get_sort_by_lambda_tuple(self) -> tuple:
"""
Returns a tuple of (lambda function, reverse boolean) for sorting.
The lambda function is used to extract the attribute to sort by,
and the boolean indicates whether to sort in reverse order.
"""
switcher = {SortFilesBy.PATH_ASC: (lambda x: x.path, False), SortFilesBy.PATH_DESC: (lambda x: x.path, True), SortFilesBy.SIZE_ASC: (lambda x: x.size, False), SortFilesBy.SIZE_DESC: (lambda x: x.size, True), SortFilesBy.DATE_ASC: (lambda x: x.last_modified, False), SortFilesBy.DATE_DESC: (lambda x: x.last_modified, True)}
return switcher[self]
|
class SortFilesBy(Enum):
def get_sort_by_lambda_tuple(self) -> tuple:
'''
Returns a tuple of (lambda function, reverse boolean) for sorting.
The lambda function is used to extract the attribute to sort by,
and the boolean indicates whether to sort in reverse order.
'''
pass
| 2
| 1
| 15
| 0
| 10
| 5
| 1
| 0.29
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 50
| 23
| 1
| 17
| 9
| 15
| 5
| 10
| 9
| 8
| 1
| 4
| 0
| 1
|
328,360
|
ImperialCollegeLondon/prefect-managedfiletransfer
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/ImperialCollegeLondon_prefect-managedfiletransfer/prefect_managedfiletransfer/TransferType.py
|
prefect_managedfiletransfer.TransferType.TransferType
|
from enum import Enum
class TransferType(str, Enum):
Copy = 'COPY'
Move = 'MOVE'
|
class TransferType(str, Enum):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 115
| 3
| 0
| 3
| 3
| 2
| 0
| 3
| 3
| 2
| 0
| 4
| 0
| 0
|
328,361
|
ImperialCollegeLondon/prefect-managedfiletransfer
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/ImperialCollegeLondon_prefect-managedfiletransfer/prefect_managedfiletransfer/constants.py
|
prefect_managedfiletransfer.constants.CONSTANTS
|
class CONSTANTS:
SKIPPED_STATE_NAME = 'Skipped'
SERVER_LOGO_URL = 'https://cdn.sanity.io/images/3ugk85nk/production/fb3f4debabcda1c5a3aeea4f5b3f94c28845e23e-250x250.png'
ONE_GIGABYTE = 1024 * 1024 * 1024
class ENV_VAR_NAMES:
PMFTUPLOAD_USERNAME = 'PMFTUPLOAD_USERNAME'
PMFTUPLOAD_PASSWORD = 'PMFTUPLOAD_PASSWORD'
class FLOW_NAMES:
UPLOAD_FILE = 'upload-file'
TRANSFER_FILES = 'transfer-files'
UNPACK_FILES = 'unpack-files'
class DEPLOYMENT_NAMES:
UPLOAD_FILE = 'upload_file'
TRANSFER_FILES = 'transfer_files'
UNPACK_FILES = 'unpack_files'
| null | 4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 18
| 3
| 15
| 15
| 11
| 0
| 15
| 15
| 11
| 0
| 0
| 0
| 0
|
328,362
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/interface.py
|
gloria.interface.Gloria
|
from matplotlib.dates import AutoDateFormatter, AutoDateLocator
from matplotlib import pyplot as plt
import seaborn as sns
import pandas as pd
from gloria.utilities.constants import _DELIM, _DTYPE_KIND, _FIT_DEFAULTS, _GLORIA_DEFAULTS, _LOAD_DATA_DEFAULTS, _PREDICT_DEFAULTS, _T_INT
import numpy as np
from gloria.utilities.configuration import assemble_config, model_from_toml
from gloria.regressors import EventRegressor, ExternalRegressor, Regressor, Seasonality
from gloria.protocols.protocol_base import Protocol
from typing import Any, Collection, Literal, Optional, Type, Union, cast
from pathlib import Path
from gloria.profiles import Profile
from gloria.plot import add_changepoints_to_plot, plot_event_component, plot_seasonality_component, plot_trend_component
from gloria.models import MODEL_MAP, ModelInputData, get_model_backend
from typing_extensions import Self
import gloria.utilities.serialize as gs
from gloria.utilities.types import Distribution, SeriesData, Timedelta
from gloria.utilities.logging import get_logger
from pydantic import BaseModel, ConfigDict, Field, field_validator
from gloria.utilities.misc import cast_series_to_kind, time_to_integer
from matplotlib.ticker import Formatter, Locator
from gloria.utilities.errors import FittedError, NotFittedError
class Gloria(BaseModel):
"""
The Gloria forecaster object is the central hub for the entire modeling
workflow.
Gloria objects are initialized with parameters controlling the fit and
prediction behaviour. Features such as ``seasonalities``, ``external
regressors``, and ``events`` (or collection of such using ``protocols``)
are added to Gloria objects. Once set up, :meth:`~Gloria.fit`,
:meth:`~Gloria.predict`, or :meth:`~Gloria.plot` methods are available to
fit the model to input data and visualize the results.
Parameters
----------
model : str
The distribution model to be used. Can be any of ``"poisson"``,
``"binomial"``, ``"negative binomial"``, ``"gamma"``, ``"beta"``,
``"beta-binomial"``, or ``"normal"``. See :ref:`Model Selection
<ref-model-selection>` tutorial for further information.
sampling_period : Union[pd.Timedelta, str]
Minimum spacing between two adjacent samples either as ``pd.Timedelta``
or a compatible string such as ``"1d"`` or ``"20 min"``.
timestamp_name : str, optional
The name of the timestamp column as expected in the input data frame
for :meth:`~Gloria.fit`.
metric_name : str, optional
The name of the expected metric column of the input data frame for
:meth:`~Gloria.fit`.
capacity_name : str, optional
The name of the column containing capacity data for the models
``"binomial"`` and ``"beta-binomial"``.
changepoints : pd.Series, optional
List of timestamps at which to include potential changepoints. If not
specified (default), potential changepoints are selected automatically.
n_changepoints : int, optional
Number of potential changepoints to include. Not used if input
'changepoints' is supplied. If ``changepoints`` is not supplied, then
``n_changepoints`` potential changepoints are selected uniformly from
the first ``changepoint_range`` proportion of the history. Must be a
positive integer.
changepoint_range : float, optional
Proportion of history in which trend changepoints will be estimated.
Must be in range [0,1]. Not used if ``changepoints`` is specified.
seasonality_prior_scale : float, optional
Parameter modulating the strength of the seasonality model. Larger
values allow the model to fit larger seasonal fluctuations, smaller
values dampen the seasonality. Can be specified for individual
seasonalities using :meth:`add_seasonality`. Must be larger than 0.
event_prior_scale : float, optional
Parameter modulating the strength of additional event regressors.
Larger values allow the model to fit larger event impact, smaller
values dampen the event impact. Can be specified for individual
events using :meth:`add_event`. Must be larger than 0.
changepoint_prior_scale : float, optional
Parameter modulating the flexibility of the automatic changepoint
selection. Large values will allow many changepoints, small values will
allow few changepoints. Must be larger than 0.
dispersion_prior_scale : float, optional
Parameter controlling the flexibility of the dispersion (i.e. allowed
variance) of the model. Larger values allow more dispersion. This
parameter does not affect the binomial and poisson model. Must be
larger than one.
interval_width : float, optional
Width of the uncertainty intervals provided for the prediction. It is
used for both uncertainty intervals of the expected value (fit) as
well as the observed values (observed). Must be in range [0,1].
trend_samples : int, optional
Number of simulated draws used to estimate uncertainty intervals of the
*trend* in prediction periods that were not included in the historical
data. Settings this value to 0 will disable uncertainty estimation.
Must be greater equal to 0.
"""
model_config = ConfigDict(extra='allow', arbitrary_types_allowed=True, validate_assignment=True)
model: Distribution = _GLORIA_DEFAULTS['model']
sampling_period: Timedelta = _GLORIA_DEFAULTS['sampling_period']
timestamp_name: str = _GLORIA_DEFAULTS['timestamp_name']
metric_name: str = _GLORIA_DEFAULTS['metric_name']
capacity_name: str = _GLORIA_DEFAULTS['capacity_name']
changepoints: Optional[pd.Series] = _GLORIA_DEFAULTS['changepoints']
n_changepoints: int = Field(ge=0, default=_GLORIA_DEFAULTS['n_changepoints'])
changepoint_range: float = Field(gt=0, lt=1, default=_GLORIA_DEFAULTS['changepoint_range'])
seasonality_prior_scale: float = Field(gt=0, default=_GLORIA_DEFAULTS['seasonality_prior_scale'])
event_prior_scale: float = Field(gt=0, default=_GLORIA_DEFAULTS['event_prior_scale'])
changepoint_prior_scale: float = Field(gt=0, default=_GLORIA_DEFAULTS['changepoint_prior_scale'])
dispersion_prior_scale: float = Field(gt=0, default=_GLORIA_DEFAULTS['dispersion_prior_scale'])
interval_width: float = Field(gt=0, lt=1, default=_GLORIA_DEFAULTS['interval_width'])
trend_samples: int = Field(ge=0, default=_GLORIA_DEFAULTS['trend_samples'])
@field_validator('sampling_period')
@classmethod
def validate_sampling_period(cls: Type[Self], sampling_period: pd.Timedelta) -> pd.Timedelta:
"""
Converts sampling period to a pandas Timedelta if it was passed as a
string instead.
"""
if sampling_period <= pd.Timedelta(0):
msg = 'Sampling period must be positive and nonzero.'
get_logger().error(msg)
raise ValueError(msg)
return sampling_period
@field_validator('changepoints', mode='before')
@classmethod
def validate_changepoints(cls: Type[Self], changepoints: Optional[SeriesData]) -> pd.Series:
"""
Converts changepoints input to pd.Series
"""
if changepoints is None:
return changepoints
try:
changepoints = pd.Series(changepoints)
except Exception as e:
raise ValueError('Input changepoints cannot be converted to a pandas Series.') from e
return changepoints
def __init__(self: Self, *args: tuple[Any, ...], **kwargs: dict[str, Any]) -> None:
"""
Initializes Gloria model.
Parameters
----------
*args : tuple[Any, ...]
Positional arguments passed through to Pydantic Model __init__()
**kwargs : dict[str, Any]
Keyword arguments passed through to Pydantic Model __init__()
"""
super().__init__(*args, **kwargs)
if self.changepoints is not None:
self.changepoints = pd.Series(pd.to_datetime(self.changepoints), name=self.timestamp_name)
self.n_changepoints = len(self.changepoints)
self.model_backend = get_model_backend(model=self.model)
self.vectorized = self.capacity_name != '' and self.model in ('binomial', 'beta-binomial')
self.external_regressors: dict[str, ExternalRegressor] = dict()
self.seasonalities: dict[str, Seasonality] = dict()
self.events: dict[str, dict[str, Any]] = dict()
self.prior_scales: dict[str, float] = dict()
self.protocols: list[Protocol] = []
self.history: pd.DataFrame = pd.DataFrame()
self.first_timestamp: pd.Timestamp = pd.Timestamp(0)
self.X: pd.DataFrame = pd.DataFrame()
self.fit_kwargs: dict[str, Any] = {}
self._config: dict[str, Any] = {'fit': _FIT_DEFAULTS.copy(), 'predict': _PREDICT_DEFAULTS.copy(), 'load_data': _LOAD_DATA_DEFAULTS.copy()}
@property
def is_fitted(self: Self) -> bool:
"""
Determines whether the present :class:`Gloria` model is fitted.
This property is *read-only*.
Returns
-------
bool
``True`` if fitted, ``False`` otherwise.
"""
return self.model_backend.fit_params != dict()
def validate_column_name(self: Self, name: str, check_seasonalities: bool=True, check_events: bool=True, check_external_regressors: bool=True) -> None:
"""
Validates the name of a seasonality, an event or an external regressor.
Parameters
----------
name : str
The name to validate.
check_seasonalities : bool, optional
Check if name already used for seasonality. The default is True.
check_events : bool, optional
Check if name already used as an event. The default is True.
check_external_regressors : bool, optional
Check if name already used for regressor The default is True.
Raises
------
TypeError
If the passed ``name`` is not a string
ValueError
Raised in case the ``name`` is not valid for any reason.
"""
if not isinstance(name, str):
raise TypeError('Name must be a string')
if _DELIM in name:
raise ValueError(f"Name cannot contain '{_DELIM}'")
reserved_names = ['fit', 'observed', 'trend']
rn_l = [n + '_lower' for n in reserved_names]
rn_u = [n + '_upper' for n in reserved_names]
reserved_names.extend(rn_l)
reserved_names.extend(rn_u)
reserved_names.extend([self.timestamp_name, self.metric_name, _T_INT, _DTYPE_KIND])
if self.vectorized:
reserved_names.append(self.capacity_name)
if name in reserved_names:
raise ValueError(f'Name {name} is reserved.')
if check_seasonalities and name in self.seasonalities:
raise ValueError(f'Name {name} already used for a seasonality.')
if check_events and name in self.events:
raise ValueError(f'Name {name} already used for an event.')
if check_external_regressors and name in self.external_regressors:
raise ValueError(f'Name {name} already used for a regressor.')
def add_seasonality(self: Self, name: str, period: str, fourier_order: int, prior_scale: Optional[float]=None) -> Self:
"""
Adds a seasonality to the Gloria object.
From the seasonality, even and odd Fourier series components up to a
user-defined maximum order will be generated and used as regressors
during fitting and predicting.
Parameters
----------
name : str
A descriptive name of the seasonality.
period : str
Fundamental period of the seasonality component. Should be a string
compatible with ``pd.Timedelta`` (eg. ``"1d"`` or ``"12 h"``).
fourier_order : int
All Fourier terms from fundamental up to ``fourier_order`` will be
used as regressors.
prior_scale : float, optional
The regression coefficient is given a prior with the specified
scale parameter. Decreasing the prior scale will add additional
regularization. If None is given self.seasonality_prior_scale will
be used (default). Must be greater than 0.
Raises
------
:class:`~gloria.utilities.errors.FittedError`
Raised in case the method is called on a fitted ``Gloria`` model.
ValueError
Raised when ``prior scale`` or ``period`` are not allowed values.
Returns
-------
Gloria
Updated Gloria object
"""
if self.is_fitted:
raise FittedError('Seasonalities must be added prior to model fitting.')
self.validate_column_name(name, check_seasonalities=False)
if name in self.seasonalities:
get_logger().info(f"'{name}' is an existing seasonality. Overwriting with new configuration.")
if prior_scale is None:
prior_scale = self.seasonality_prior_scale
prior_scale = float(prior_scale)
if prior_scale <= 0:
raise ValueError('Prior scale must be > 0')
if fourier_order <= 0 or not isinstance(fourier_order, int):
raise ValueError('Fourier Order must be an integer > 0')
self.seasonalities[name] = Seasonality(name=name, period=pd.to_timedelta(period) / self.sampling_period, fourier_order=fourier_order, prior_scale=prior_scale)
return self
def add_event(self: Self, name: str, regressor_type: str, profile: Union[Profile, dict[str, Any]], prior_scale: Optional[float]=None, include: Union[bool, Literal['auto']]='auto', **regressor_kwargs: Any) -> Self:
"""
Adds an event to the Gloria object.
The event will be treated as a regressor during fitting and predicting.
Parameters
----------
name : str
A descriptive name of the event.
regressor_type : str
Type of the underlying event regressor. Must be any of
``"SingleEvent"``, ``"IntermittentEvent"``, ``"PeriodicEvent"``,
``"Holiday"``
profile : Union[Profile, dict[str, Any]]
The base profile used by the event regressor. Must be either of
type :class:`Profile` or a dictionary an event can be constructed
from using :meth:`Profile.from_dict`
prior_scale : float
The regression coefficient is given a prior with the specified
scale parameter. Decreasing the prior scale will add additional
regularization. Must be large than 0.
include : Union[bool, Literal["auto"]]
If set to ``"auto"`` (default), the event regressor will be
excluded from the model during :meth:`fit`, if its overlap with the
data is negligible. this behaviour can be overwritten by setting
``include`` to ``True`` or ``False``
**regressor_kwargs : Any
Additional keyword arguments necessary to create the event
regressor specified by ``regressor_type``.
Raises
------
:class:`~gloria.utilities.errors.FittedError`
Raised in case the method is called on a fitted ``Gloria`` model.
ValueError
Raised in case of invalid ``prior_scale`` or ``include`` values.
Returns
-------
Gloria
The ``Gloria`` model updated with the new event
"""
if self.is_fitted:
raise FittedError('Event must be added prior to model fitting.')
self.validate_column_name(name, check_events=False)
if name in self.events:
get_logger().info(f"'{name}' is an existing event. Overwriting with new configuration.")
if prior_scale is None:
prior_scale = self.event_prior_scale
prior_scale = float(prior_scale)
if prior_scale <= 0:
raise ValueError('Prior scale must be > 0')
if not (isinstance(include, bool) or include == 'auto'):
raise ValueError("include must be True, False, or 'auto'.")
if isinstance(profile, Profile):
profile = profile.to_dict()
regressor_dict = {'name': name, 'prior_scale': prior_scale, 'regressor_type': regressor_type, 'profile': profile, **regressor_kwargs}
new_regressor = Regressor.from_dict(regressor_dict)
if isinstance(new_regressor, EventRegressor):
regressor_info = {'regressor': new_regressor, 'include': include}
self.events[name] = regressor_info
else:
raise TypeError(f'The created regressor must be an EventRegressor but is {type(new_regressor)}.')
return self
def add_external_regressor(self: Self, name: str, prior_scale: float) -> Self:
"""
Add an external regressor to the Gloria object.
The external regressor will be used for fitting and predicting.
Parameters
----------
name : str
A descriptive name of the regressor. The dataframes passed to
:meth:`fit` and :meth:`predict` must have a column with the
specified name. The values in these columns are used for the
regressor.
prior_scale : float
The regression coefficient is given a prior with the specified
scale parameter. Decreasing the prior scale will add additional
regularization. Must be greater than 0.
Raises
------
:class:`~gloria.utilities.errors.FittedError`
Raised in case the method is called on a fitted Gloria model.
ValueError
Raised in case of an invalid ``prior_scale`` value.
Returns
-------
Gloria
Updated Gloria object
"""
if self.is_fitted:
raise FittedError('Regressors must be added prior to model fitting.')
self.validate_column_name(name, check_external_regressors=False)
if name in self.external_regressors:
get_logger().info(f"'{name}' is an existing external regressor. Overwriting with new configuration.")
prior_scale = float(prior_scale)
if prior_scale <= 0:
raise ValueError('Prior scale must be > 0')
self.external_regressors[name] = ExternalRegressor(name=name, prior_scale=prior_scale)
return self
def add_protocol(self: Self, protocol: Protocol) -> Self:
"""
Add a protocol to the Gloria object.
Protocols provide additional, automated routines for setting up the
model during :meth:`fit`. As of now, only the
:class:`~gloria.CalendricData` protocol is implemented.
Parameters
----------
protocol : Protocol
The Protocol object to be added.
Raises
------
:class:`~gloria.utilities.errors.FittedError`
Raised in case the method is called on a fitted Gloria model.
TypeError
Raised when the provided ``protocol`` is not a valid Protocol
object.
Returns
-------
Gloria
Updated Gloria model.
"""
if self.is_fitted:
raise FittedError('Protocols must be added prior to model fitting.')
if not isinstance(protocol, Protocol):
raise TypeError(f"The protocol must be of type 'Protocol', but is {type(protocol)}.")
p_type = protocol._protocol_type
existing_types = set((p._protocol_type for p in self.protocols))
if p_type in existing_types:
get_logger().warning(f'The model already has a protocol of type {p_type}. Adding another one may lead to unexpected behaviour due to interferences.')
self.protocols.append(protocol)
return self
def validate_metric_column(self: Self, df: pd.DataFrame, name: str, col_type: Literal['Metric', 'Capacity']='Metric') -> None:
"""
Validate that the metric column exists and contains only valid values.
Parameters
----------
df : pd.DataFrame
Input pandas DataFrame of data to be fitted.
name : str
The metric column name
col_type : Literal["Metric", "Capacity"], optional
Specifies whether the metric column or capacity column is to be
validated. The default is "Metric".
Raises
------
KeyError
Raised if the metric column doesn't exist in the DataFrame
TypeError
Raised if the metric columns dtype does not fit to the model
ValueError
Raised if there are any NaNs in the metric column
"""
if name not in df:
raise KeyError(f"{col_type} column '{name}' is missing from DataFrame.")
m_dtype_kind = df[name].dtype.kind
allowed_types = list(MODEL_MAP[self.model].kind)
if m_dtype_kind not in allowed_types:
type_list = ', '.join([f"'{s}'" for s in allowed_types])
raise TypeError(f"{col_type} column '{name}' type is '{m_dtype_kind}', but must be any of {type_list} for model '{self.model}'.")
if df[name].isnull().any():
raise ValueError(f"Found NaN in {col_type.lower()} column '{name}'.")
def validate_dataframe(self: Self, df: pd.DataFrame) -> pd.DataFrame:
"""
Validates that the input data frame of the fitting-method adheres to
all requirements.
Parameters
----------
df : pd.DataFrame
DataFrame that contains at the very least a timestamp column with
name self.timestamp_name and a numeric column with name
self.metric_name. If the Gloria models is 'binomial' and
'beta-binomial' in vectorized capacity form are to be used, a
column with name self.capacity_name must exists. If external
regressors were added to the model, the respective columns must be
present as well.
Returns
-------
pd.DataFrame
Validated DataFrame that is reduced to timestamp, metric and
external regressor columns.
"""
if df.shape[0] < 2:
raise ValueError('Dataframe has less than 2 non-NaN rows.')
if self.timestamp_name not in df:
raise KeyError(f"Timestamp column '{self.timestamp_name}' is missing from DataFrame.")
if df.index.name == self.timestamp_name:
raise KeyError(f"Timestamp '{self.timestamp_name}' is set as index but expected to be a column")
time = df[self.timestamp_name]
if time.dtype.kind != 'M':
raise TypeError(f"Timestamp column '{self.timestamp_name}' is not of type datetime.")
if time.isnull().any():
raise ValueError(f"Found NaN in timestamp column '{self.timestamp_name}'.")
if time.dt.tz is not None:
raise NotImplementedError(f"Timestamp column '{self.timestamp_name}' has timezone specified, which is not supported. Remove timezone.")
if not time.is_monotonic_increasing:
raise ValueError(f"Timestamp column '{self.timestamp_name}' is not sorted.")
sample_multiples = (time - time.min()) / self.sampling_period
sampling_is_valid = sample_multiples.apply(float.is_integer).all()
if not sampling_is_valid:
raise ValueError(f"Timestamp column '{self.timestamp_name}' is not sampled with expected sampling period '{self.sampling_period}'")
if (sample_multiples.diff() > 1).any():
get_logger().debug('All timestamps are multiples of the sampling period, but gaps were found.')
self.validate_metric_column(df=df, name=self.metric_name, col_type='Metric')
if self.vectorized:
self.validate_metric_column(df=df, name=self.capacity_name, col_type='Capacity')
if (df[self.capacity_name] < df[self.metric_name]).any():
raise ValueError("There are values in the metric column that exceed the corresponding values in the capacity column, which is not allowed for models 'binomial' and 'beta-binomial'.")
for name in self.external_regressors:
if name not in df:
raise KeyError(f"Regressor column '{name}' is missing from DataFrame.")
if df[name].dtype.kind not in 'biuf':
raise TypeError(f"Regressor column '{name}' is non-numeric.")
if df[name].isnull().any():
raise ValueError(f"Regressor column '{name}' contains NaN.")
history = df.loc[:, [self.timestamp_name, self.metric_name, *self.external_regressors.keys()]].copy()
if self.vectorized:
history[self.capacity_name] = df[self.capacity_name].copy()
return history
def set_changepoints(self: Self) -> Self:
"""
Sets changepoints
Sets changepoints to the dates and corresponding integer values of
changepoints. The following cases are handled:
1. The changepoints were passed in explicitly.
a. They are empty.
b. They are not empty, and need validation.
2. We are generating a grid of them.
3. The user prefers no changepoints be used.
"""
if self.changepoints is not None:
if len(self.changepoints) == 0:
pass
else:
too_low = self.changepoints.min() < self.history[self.timestamp_name].min()
too_high = self.changepoints.max() > self.history[self.timestamp_name].max()
if too_low or too_high:
raise ValueError('Changepoints must fall within training data.')
else:
hist_size = int(np.floor(self.history.shape[0] * self.changepoint_range))
if self.n_changepoints + 1 > hist_size:
get_logger().warning(f'Provided number of changepoints {self.n_changepoints} greater than number of observations in changepoint range. Using {hist_size - 1} instead. Consider reducing n_changepoints.')
self.n_changepoints = hist_size - 1
get_logger().debug(f'Distributing {self.n_changepoints} equidistant changepoints.')
if self.n_changepoints > 0:
cp_indexes = np.linspace(0, hist_size - 1, self.n_changepoints + 1).round().astype(int)
self.changepoints = self.history.iloc[cp_indexes][self.timestamp_name].tail(-1)
else:
self.changepoints = pd.Series(pd.to_datetime([]), name=self.timestamp_name, dtype='<M8[ns]')
if len(self.changepoints) > 0:
changepoints_int_loc = time_to_integer(self.changepoints, self.first_timestamp, self.sampling_period)
self.changepoints_int = pd.Series(changepoints_int_loc, name=_T_INT, dtype=int).sort_values().reset_index(drop=True)
else:
self.changepoints_int = pd.Series([0], name=_T_INT, dtype=int)
return self
def time_to_integer(self: Self, history: pd.DataFrame) -> pd.DataFrame:
"""
Create a new column from timestamp column of input data frame that
contains corresponding integer values with respect to sampling_delta.
Parameters
----------
history : pd.DataFrame
Validated input data frame of fit method
Returns
-------
history : pd.DataFrame
Updated data frame
"""
time = history[self.timestamp_name]
self.first_timestamp = time.min()
self.last_timestamp = time.max()
time_as_int = time_to_integer(time, self.first_timestamp, self.sampling_period)
history[_T_INT] = time_as_int
return history
def make_all_features(self: Self, data: Optional[pd.DataFrame]=None) -> tuple[pd.DataFrame, dict[str, float]]:
"""
Creates the feature matrix X containing all regressors used in the fit
and for prediction. Also returns prior scales for all features.
Parameters
----------
data : Optional[pd.DataFrame], optional
Input dataframe. It must contain at least a column with integer
timestamps (for column name cf. to _T_INT constant) as well as
the external regressor columns associated with the model. Default
of data is None, in which case the model history will be used.
Returns
-------
X : pd.DataFrame
Feature matrix with columns for the different features and rows
corresponding to the timestamps
prior_scales : dict[str,float]
A dictionary mapping feature -> prior scale
"""
if data is None:
data = self.history
make_features = [lambda s=s: s.make_feature(data[_T_INT]) for s in self.seasonalities.values()]
make_features.extend([lambda er=er: er.make_feature(data[_T_INT], data[er.name]) for er in self.external_regressors.values()])
for name in list(self.events.keys()):
regressor = self.events[name]['regressor']
if not self.is_fitted:
include = self.events[name]['include']
if include is False:
continue
impact = regressor.get_impact(data[self.timestamp_name])
if impact < 0.1 and include is True:
get_logger().warning(f"Event '{regressor.name}' hardly occurs during timerange of interest, which may lead to unreliable or failing fits. Consider setting include='auto'.")
elif impact < 0.1:
get_logger().warning(f"Event '{regressor.name}' hardly occurs during timerange of interest. Removing it from model. Set include=True to overwrite this.")
del self.events[name]
continue
make_features.append(lambda reg=regressor: reg.make_feature(data[self.timestamp_name]))
X_lst = []
prior_scales: dict[str, float] = dict()
for make_feature in make_features:
X_loc, prior_scales_loc = make_feature()
X_lst.append(X_loc)
prior_scales = {**prior_scales, **prior_scales_loc}
if X_lst:
X = pd.concat(X_lst, axis=1)
else:
X = pd.DataFrame()
return (X, prior_scales)
def preprocess(self: Self, data: pd.DataFrame) -> ModelInputData:
"""
Validates input data and prepares the model with respect to the data.
Parameters
----------
data : pd.DataFrame
DataFrame that contains at the very least a timestamp column with
name self.timestamp_name and a numeric column with name
self.metric_name. If external regressors were added to the model,
the respective columns must be present as well.
Returns
-------
ModelInputData
Data as used by the model backend. Contains the inputs that all
stan models have in common.
"""
self.history = self.validate_dataframe(data)
self.history.sort_values(by=self.timestamp_name, inplace=True)
self.history.reset_index(drop=True, inplace=True)
self.history = self.time_to_integer(self.history)
for protocol in self.protocols:
protocol.set_events(model=self, timestamps=self.history[self.timestamp_name])
protocol.set_seasonalities(model=self, timestamps=self.history[self.timestamp_name])
self.X, self.prior_scales = self.make_all_features()
self.set_changepoints()
input_data = ModelInputData(T=self.history.shape[0], S=len(self.changepoints_int), K=self.X.shape[1], tau=self.changepoint_prior_scale, gamma=self.dispersion_prior_scale, y=np.asarray(self.history[self.metric_name]), t=np.asarray(self.history[_T_INT]), t_change=np.asarray(self.changepoints_int), X=self.X.values, sigmas=np.array(list(self.prior_scales.values())))
if self.vectorized:
input_data.capacity = np.asarray(self.history[self.capacity_name])
return input_data
def fit(self: Self, data: pd.DataFrame, toml_path: Optional[Union[str, Path]]=None, **kwargs: dict[str, Any]) -> Self:
"""
Fits the Gloria object.
The fitting routine validates input data, sets up the model based on
all input parameters, added regressors or protocols, and eventually
calls the model backend for the actual fitting.
Parameters
----------
data : pd.DataFrame
A pandas DataFrame containing timestamp and metric columns named
according to ``self.timestamp_name`` and ``self.metric_name``,
respectively. If *external regressors* were added to the model, the
respective columns must be present as well.
toml_path : Optional[Union[str, Path]], optional
Path to an optional configuration TOML file that contains a section
keyed by ``[fit]``. If *None* (default), TOML configuration is
skipped. TOML configuration precedes model settings saved in
``self._config`` as well as default settings.
optimize_mode : str, optional
If ``"MAP"`` (default), the optimization step yields the Maximum A
Posteriori estimation, if ``"MLE"`` a Maximum likehood estimation.
use_laplace : bool, optional
If ``True`` (default), the optimization is followed by a sampling
over the Laplace approximation around the posterior mode.
capacity : int, optional
An upper bound used for ``binomial`` and ``beta-binomial`` models.
Note that ``capacity`` must be >= all values in the metric column
of ``data``. Default is ``None``. Specifying ``capacity`` is
mutually exclusive with providing a ``capacity_mode`` and
``capacity_value`` pair.
capacity_mode : str, optional
A method used to estimate the capacity. Three modes are available:
- ``"constant"``: The provided ``capacity_value`` equals the
capacity.
- ``"factor"``: The capacity is the maximum of the response
variable times ``capacity_value``.
- ``"scale"``: The capacity is optimized such that the response
variable is distributed around the expectation value
:math:`N \\times p` with :math:`N=` capacity and :math:`p=`
``capacity_value``. This mode is the default using
``capacity_value=0.5``.
capacity_value : float, optional
A value associated with the selected ``capacity_mode``:
- If ``capacity_mode="constant"``, ``capacity_value`` must be an
integer :math:`\\ge` the maximum of the response variable.
- If ``capacity_mode="factor"``, ``capacity_value`` must be
:math:`\\ge` 1.
- If ``capacity_mode="scale"``, ``capacity_value`` must be in
:math:`[0,1]`.
Raises
------
:class:`~gloria.utilities.errors.FittedError`
Raised in case the method is called on a fitted ``Gloria`` model.
Returns
-------
Gloria
Updated Gloria object.
Notes
-----
The configuration of the ``fit`` method is composed of four layers,
each one overriding the previous:
1. **Model defaults** - baseline configuration with default values.
2. **Global TOML file** - key-value pairs in the ``[fit]`` section of
the TOML file passed to :meth:`Gloria.from_toml`, if the instance
was created that way.
3. **Local TOML file** - key-value pairs in the ``[fit]`` section of
the TOML file provided via ``toml_path``.
4. **Keyword overrides** - additional arguments provided via
``**kwargs`` take highest precedence.
"""
if self.is_fitted:
raise FittedError('Gloria object can only be fit once. Instantiate a new object.')
config = assemble_config(method='fit', model=self, toml_path=toml_path, **kwargs)
self.fit_kwargs = dict(optimize_mode=config['optimize_mode'], use_laplace=config['use_laplace'], capacity=config['capacity'], capacity_mode=config['capacity_mode'], capacity_value=config['capacity_value'])
get_logger().debug('Starting to preprocess input data.')
input_data = self.preprocess(data)
get_logger().debug('Handing over preprocessed data to model backend.')
self.model_backend.fit(input_data, optimize_mode=config['optimize_mode'], use_laplace=config['use_laplace'], capacity=config['capacity'], capacity_mode=config['capacity_mode'], capacity_value=config['capacity_value'], vectorized=self.vectorized)
return self
def predict(self: Self, data: Optional[pd.DataFrame]=None, toml_path: Optional[Union[str, Path]]=None, **kwargs: dict[str, Any]) -> pd.DataFrame:
"""
Generate forecasts from a *fitted* :class:`Gloria` model.
Two usage patterns are supported:
1. **Explicit input dataframe** - ``data`` contains future
(or historical) timestamps plus any required external-regressor
columns.
2. **Auto-generated future dataframe** - leave ``data`` as ``None`` and
supply the helper kwargs ``periods`` and/or ``include_history``.
This shortcut only works when the model has *no* external
regressors.
Parameters
----------
data : Optional[pd.DataFrame], optional
A pandas DataFrame containing timestamp and metric columns named
according to ``self.timestamp_name`` and ``self.metric_name``,
respectively. If *external regressors* were added to the model,
the respective columns must be present as well. If ``None``, a
future dataframe is produced with :meth:`make_future_dataframe`.
toml_path : Optional[Union[str, Path]], optional
Path to a TOML file whose ``[predict]`` section should be merged
into the configuration. Ignored when ``None``.
periods : int
Number of future steps to generate. Must be a positive integer.
Measured in units of``self.sampling_period``. The default is ``1``.
include_history : bool, optional
If ``True`` (default), the returned frame includes the historical
dates that wereseen during fitting; if ``False`` it contains only
the future portion.
Returns
-------
prediction : pd.DataFrame
A dataframe containing timestamps, predicted metric, trend, and
lower and upper bounds.
Notes
-----
The configuration of the predict method via ``periods`` and ``include``
is composed in four layers, each one overriding the previous:
1. **Model defaults** - the baseline configuration with defaults given
above.
2. **Global TOML file** - key-value pairs in the ``[predict]`` table of
the TOML file passed to :meth:`Gloria.from_toml` if the current
Gloria instance was created this way.
3. **Local TOML file** - key-value pairs in the ``[predict]`` table of
the TOML file provided for ``toml_path``.
4. **Keyword overrides** - additional arguments supplied directly to
the method take highest precedence.
"""
if not self.is_fitted:
raise NotFittedError('Can only predict using a fitted model.')
if data is None:
if len(self.external_regressors) > 0:
raise ValueError('If the model has external regressors, data must be explicitly provided.')
config = assemble_config(method='predict', model=self, toml_path=toml_path, **kwargs)
data = self.make_future_dataframe(**config)
data = cast(pd.DataFrame, data)
capacity_vec = None
if self.vectorized:
self.validate_metric_column(df=data, name=self.capacity_name, col_type='Capacity')
capacity_vec = data[self.capacity_name].copy()
missing_regressors = [f"'{name}'" for name in self.external_regressors if name not in data]
if missing_regressors:
missing_regressors_str = ', '.join(missing_regressors)
raise KeyError(f'Prediction input data miss the external regressor column(s) {missing_regressors_str}.')
for name in self.external_regressors:
if data[name].dtype.kind not in 'biuf':
raise TypeError(f"Regressor column '{name}' is non-numeric.")
if data[name].isnull().any():
raise ValueError(f"Regressor column '{name}' contains NaN.")
data = data.copy().reset_index(drop=True)
data[_T_INT] = time_to_integer(data[self.timestamp_name], self.first_timestamp, self.sampling_period)
X, _ = self.make_all_features(data)
prediction = self.model_backend.predict(t=np.asarray(data[_T_INT]), X=np.asarray(X), interval_width=self.interval_width, trend_samples=self.trend_samples, capacity_vec=capacity_vec)
prediction.insert(0, self.timestamp_name, np.asarray(data[self.timestamp_name]))
return prediction
def make_future_dataframe(self: Self, periods: int=1, include_history: bool=True) -> pd.DataFrame:
"""
Build a timestamp skeleton that extends the training horizon.
This helper is typically used when you plan to call :meth:`predict`.
It produces a frame with a single column, named according to
``self.timestamp_name``, whose values
* start one sampling step after the last training timestamp, and
* continue for ``periods`` intervals spaced by
``self.sampling_period``.
If ``include_history`` is ``True`` the original training timestamps are
prepended, yielding a contiguous timeline from the first observed point
up to the requested forecast horizon.
Parameters
----------
periods : int
Number of future steps to generate. Must be a positive integer.
Measured in units of ``self.sampling_period``. The default is
``1``.
include_history : bool, optional
If ``True`` (default), the returned frame includes the historical
dates that wereseen during fitting; if ``False`` it contains only
the future portion.
Raises
------
NotFittedError
The model has not been fitted yet.
TypeError
If ``preriods`` is not an integer.
ValueError
If ``periods`` is < 1.
Returns
-------
future_df : pd.DataFrame
A dataframe with a single column ``self.timestamp_name`` containing
``pd.Timestamps``. It can be passed directly to :meth:`predict`
if the model has no external regressors. When the model relies on
external regressors you must merge the appropriate regressor
columns into ``future_df`` before forecasting.
"""
if not self.is_fitted:
raise NotFittedError()
if not isinstance(periods, int):
raise TypeError(f"Argument 'periods' must be an integer but is {type(periods)}.")
if periods < 1:
raise ValueError("Argument 'periods' must be >= 1.")
new_timestamps = pd.Series(pd.date_range(start=self.last_timestamp + self.sampling_period, periods=periods, freq=self.sampling_period))
if include_history:
new_timestamps = pd.concat([self.history[self.timestamp_name], new_timestamps])
return pd.DataFrame({self.timestamp_name: new_timestamps}).reset_index(drop=True)
def load_data(self: Self, toml_path: Optional[Union[str, Path]]=None, **kwargs: dict[str, Any]) -> pd.DataFrame:
"""
Load and configure the time-series input data for fit method.
Reads a .csv-file that must contain at least two columns: a timestamp
and a metric column named according to ``self.timestamp_name`` and
``self.metric_name``, respectively. The timestamp column is converted
to a series of ``pd.Timestamps`` and the metric column is cast to
``dtype_kind``.
Parameters
----------
toml_path : Optional[Union[str, Path]], optional
Path to a TOML file whose ``[load_data]`` section overrides the
model defaults. Ignored when ``None``.
source : Union[str, Path]
Location of the CSV file to load the input data from. This key must
be provided.
dtype_kind : bool, optional
Desired *kind* of the metric column as accepted by NumPy
(``"u"`` unsigned int, ``"i"`` signed int, ``"f"`` float, ``"b"``
boolean). If omitted, the metric dtype is cast to float.
Returns
-------
data : pandas.DataFrame
The preprocessed dataframe ready for modelling
Notes
-----
The configuration of the ``load_data`` method via ``source`` and
``dtype_kind`` is composed in four layers, each one overriding the
previous:
1. **Model defaults** - the baseline configuration with defaults given
above.
2. **Global TOML file** - key-value pairs in the ``[load_data]`` table
of the TOML file passed to :meth:`Gloria.from_toml` if the current
Gloria instance was created this way.
3. **Local TOML file** - key-value pairs in the ``[load_data]`` table
of the TOML file provided for ``toml_path``.
4. **Keyword overrides** - additional arguments supplied directly to
the method take highest precedence.
"""
config = assemble_config(method='load_data', model=self, toml_path=toml_path, **kwargs)
data = pd.read_csv(config['source'])
data[self.timestamp_name] = pd.to_datetime(data[self.timestamp_name])
data[self.metric_name] = cast_series_to_kind(data[self.metric_name], config['dtype_kind'])
return data
def to_dict(self: Self) -> dict[str, Any]:
"""
Converts Gloria object to a dictionary of JSON serializable types.
Only works on fitted Gloria objects.
The method calls :func:`model_to_dict` on ``self``.
Returns
-------
dict[str, Any]
JSON serializable dictionary containing data of Gloria model.
"""
return gs.model_to_dict(self)
def to_json(self: Self, filepath: Optional[Path]=None, **kwargs: Any) -> str:
"""
Converts Gloria object to a JSON string.
Only works on fitted Gloria objects. If desired the model is
additionally dumped to a .json-file.
The method calls :func:`model_to_json` on ``self``.
Parameters
----------
filepath : Optional[Path], optional
Filepath of the target .json-file. If ``None`` (default) no output-
file will be written.
**kwargs : Any
Keyword arguments which are passed through to :func:`json.dump` and
:func:`json.dumps`
Raises
------
ValueError
In case the given filepath does not have .json extension.
Returns
-------
str
JSON string containing the model data of the fitted Gloria object.
"""
return gs.model_to_json(self, filepath=filepath, **kwargs)
@staticmethod
def from_dict(model_dict: dict[str, Any]) -> 'Gloria':
"""
Restores a fitted Gloria model from a dictionary.
The input dictionary must be the output of :func:`model_to_dict` or
:meth:`Gloria.to_dict`.
The method calls :func:`model_from_dict` on ``self``.
Parameters
----------
model_dict : dict[str, Any]
Dictionary containing the Gloria object data.
Returns
-------
Gloria
Input data converted to a fitted Gloria object.
"""
return gs.model_from_dict(model_dict)
@staticmethod
def from_json(model_json: Union[Path, str], return_as: Literal['dict', 'model']='model') -> Union[dict[str, Any], 'Gloria']:
"""
Restores a fitted Gloria model from a json string or file.
The input json string must be the output of :func:`model_to_json` or
:meth:`Gloria.to_json`. If the input is a json-file, its contents is
first read to a json string.
The method calls :func:`model_from_json` on ``self``.
Parameters
----------
model_json : Union[Path, str]
Filepath of .json-model file or string containing the data.
return_as : Literal['dict', 'model'], optional
If ``dict`` (default), the model is returned in dictionary format,
if ``model`` as fitted Gloria object.
Raises
------
ValueError
Two ValueErrors are possible:
1. In case the given filepath does not have .json extension
2. If ``return_as`` is neither ``"dict"`` nor ``"model"``
Returns
-------
Union[dict[str, Any], Gloria]
Gloria object or dictionary representing the Gloria object based on
input json data.
"""
return gs.model_from_json(model_json, return_as)
@staticmethod
def from_toml(toml_path: Union[str, Path], ignore: Union[Collection[str], str]=set(), **kwargs: dict[str, Any]) -> 'Gloria':
"""
Instantiate and configure a Gloria object from a TOML configuration
file.
The TOML file is expected to have the following top-level tables /
arrays-of-tables (all are optional except ``[model]``):
* ``[model]`` - keyword arguments passed directly to the
:class:`Gloria` constructor.
* ``[[external_regressors]]`` - one table per regressor; each is
forwarded to :meth:`~Gloria.add_external_regressor`.
* ``[[seasonalities]]`` - one table per seasonality; each is
forwarded to :meth:`~Gloria.add_seasonality`.
* ``[[events]]`` - one table per event; each is forwarded to
:meth:`~Gloria.add_event`.
* ``[[protocols]]`` - one table per protocol. Each table **must**
contain a ``type`` key that maps to a protocol class name; the
remaining keys are passed to that class before calling
:meth:`~Gloria.add_protocol`.
Defaults as defined in :class:`Gloria` constructor or respective
methods are used for all keys not provided in the TOML file. ``kwargs``
can be used to overwrite keys found in the ``[model]`` table.
Parameters
----------
toml_path : Union[str, Path]
Path to the TOML file containing the model specification.
ignore : Union[Collection[str],str], optional
Which top-level sections of the file to skip. Valid values are
``"external_regressors"``, ``"seasonalities"``, ``"events"``, and
``"protocols"``. The special value ``"all"`` suppresses every
optional section. May be given as a single string or any iterable
of strings.
**kwargs : dict[str, Any]
Keyword arguments that override or extend the ``[model]`` table.
Only keys that are valid fields of Gloria (i.e. that appear in
Gloria.model_fields) are retained; others are silently dropped.
Returns
-------
Gloria
A fully initialized Gloria instance.
.. seealso::
:func:`model_from_toml`
An alias
Notes
-----
Precedence order for :class:`Gloria` constructor arguments from highest
to lowest is:
1. Values supplied via ``kwargs``
2. Values found in the TOML ``[model]`` table
3. Gloria's own defaults
"""
return model_from_toml(toml_path, ignore, **kwargs)
@staticmethod
def Foster():
with open(Path(__file__).parent / 'foster.txt', 'r', encoding='utf8') as f:
print(f.read(), end='\n\n')
print(' -------------------- '.center(70))
print('| Here, take a cookie. |'.center(70))
print(' ==================== '.center(70))
def plot(self: Self, fcst: pd.DataFrame, ax: Optional[plt.Axes]=None, uncertainty: Optional[bool]=True, show_changepoints: Optional[bool]=False, include_legend: Optional[bool]=False, mark_anomalies: Optional[bool]=False, show_capacity: Optional[bool]=False, plot_kwargs: Optional[dict[str, Any]]=None, rcparams_kwargs: Optional[dict[str, Any]]=None, style_kwargs: Optional[dict[str, Any]]=None, scatter_kwargs: Optional[dict[str, Any]]=None, trend_kwargs: Optional[dict[str, Any]]=None, forecast_kwargs: Optional[dict[str, Any]]=None, interval_kwargs: Optional[dict[str, Any]]=None, xlabel_kwargs: Optional[dict[str, Any]]=None, ylabel_kwargs: Optional[dict[str, Any]]=None, grid_y_kwargs: Optional[dict[str, Any]]=None, despine_kwargs: Optional[dict[str, Any]]=None, ticklabel_kwargs: Optional[dict[str, Any]]=None, anomaly_kwargs: Optional[dict[str, Any]]=None, capacity_kwargs: Optional[dict[str, Any]]=None, date_locator: Optional[Locator]=None, date_formatter: Optional[Formatter]=None) -> plt.Figure:
"""
Plot the forecast, trend, and observed data with extensive
customization options.
Parameters
----------
fcst : pandas.DataFrame
Forecast DataFrame containing:
- Timestamp column (matching `self.timestamp_name`)
- 'yhat' (predicted values)
- 'trend' (trend component)
- 'observed_lower' and 'observed_upper' (confidence intervals)
ax : matplotlib.axes.Axes, optional
Existing matplotlib axis to draw on.
If None, a new figure and axis will be created.
uncertainty : bool, default=True
Whether to plot the confidence interval bands.
show_changepoints : bool, default=False
Whether to annotate changepoints in the forecast.
include_legend : bool, default=False
Whether to include a legend in the plot.
plot_kwargs : dict, optional
Arguments for `plt.subplots()` if creating a new figure.
rcparams_kwargs : dict, optional
Overrides for matplotlib rcParams to control global styling.
style_kwargs : dict, optional
Keyword arguments passed to `sns.set()` for Seaborn style
configuration.
scatter_kwargs : dict, optional
Styling for the historical data scatter plot (`sns.scatterplot`).
trend_kwargs : dict, optional
Styling for the trend line (`ax.plot`).
forecast_kwargs : dict, optional
Styling for the forecast line (`ax.plot`).
interval_kwargs : dict, optional
Styling for the confidence interval area (`ax.fill_between`).
xlabel_kwargs : dict, optional
Settings for the x-axis label (`ax.set_xlabel`).
ylabel_kwargs : dict, optional
Settings for the y-axis label (`ax.set_ylabel`).
grid_y_kwargs : dict, optional
Settings for the y-axis gridlines (`ax.grid`).
despine_kwargs : dict, optional
Arguments to `sns.despine()` for removing spines.
ticklabel_kwargs : dict, optional
Settings for customizing tick labels (rotation, alignment,
fontsize).
anomaly_kwargs: dict, optional
Styling for the anomaly data scatter plot (`sns.scatterplot`).
capacity_kwargs: dict, optional
Styling for the capcity line (`ax.plot`).
date_locator : matplotlib.ticker.Locator, optional
Locator for x-axis ticks. Defaults to `AutoDateLocator`.
date_formatter : matplotlib.ticker.Formatter, optional
Formatter for x-axis tick labels. Defaults to `AutoDateFormatter`.
Returns
-------
None
Raises
------
NotFittedError
If the model has not been fitted before calling this method.
Notes
-----
This method is designed for flexible, reproducible, and highly
customizable visualization of forecasts and their uncertainty
intervals. You can control nearly every aspect of the figure appearance
via the provided keyword argument dictionaries.
Examples
--------
Basic usage:
>>> model.plot(fcst)
Custom scatter point styling:
>>> model.plot(fcst, scatter_kwargs={"s": 40,
"color": "purple"})
Specifying figure size and dpi:
>>> model.plot(fcst, plot_kwargs={"figsize": (12, 8),
"dpi": 200})
"""
if not self.is_fitted:
raise NotFittedError()
fcst = fcst.copy()
plot_kwargs = plot_kwargs or {}
rcparams_kwargs = rcparams_kwargs or {}
style_kwargs = style_kwargs or {}
scatter_kwargs = scatter_kwargs or {}
trend_kwargs = trend_kwargs or {}
forecast_kwargs = forecast_kwargs or {}
interval_kwargs = interval_kwargs or {}
xlabel_kwargs = xlabel_kwargs or {}
ylabel_kwargs = ylabel_kwargs or {}
grid_y_kwargs = grid_y_kwargs or {}
despine_kwargs = despine_kwargs or {}
ticklabel_kwargs = ticklabel_kwargs or {}
anomaly_kwargs = anomaly_kwargs or {}
capacity_kwargs = capacity_kwargs or {}
style_defaults = dict(style='whitegrid')
style_defaults.update(style_kwargs)
sns.set(**style_defaults)
rcparams_defaults = {'font.size': 14, 'font.family': 'DejaVu Sans', 'axes.titlesize': 18, 'axes.labelsize': 16, 'legend.fontsize': 14, 'axes.edgecolor': '#333333', 'axes.linewidth': 1.2}
rcparams_defaults.update(rcparams_kwargs)
with plt.rc_context(rc=rcparams_defaults):
with sns.axes_style(style_defaults):
if 'xlabel' not in xlabel_kwargs:
xlabel_kwargs['xlabel'] = self.timestamp_name
if 'ylabel' not in ylabel_kwargs:
ylabel_kwargs['ylabel'] = self.metric_name
user_provided_ax = ax is not None
if ax is None:
plot_defaults = dict(figsize=(10, 6), dpi=150, facecolor='w')
plot_defaults.update(plot_kwargs)
fig, ax = plt.subplots(**plot_defaults)
else:
fig = ax.get_figure()
scatter_defaults = dict(color='#016a86', edgecolor='w', s=20, alpha=0.7, label='Observed', ax=ax)
scatter_defaults.update(scatter_kwargs)
trend_defaults = dict(color='#264653', linewidth=1.0, alpha=0.8, label='Trend')
trend_defaults.update(trend_kwargs)
forecast_defaults = dict(color='#e6794a', linewidth=1.5, label='Forecast')
forecast_defaults.update(forecast_kwargs)
interval_defaults = dict(color='#819997', alpha=0.3, label='Confidence Interval')
interval_defaults.update(interval_kwargs)
sns.scatterplot(x=self.history[self.timestamp_name], y=self.history[self.metric_name], **scatter_defaults)
ax.plot(fcst[self.timestamp_name], fcst['trend'], **trend_defaults)
ax.plot(fcst[self.timestamp_name], fcst['yhat'], **forecast_defaults)
if uncertainty:
ax.fill_between(fcst[self.timestamp_name], fcst['observed_lower'], fcst['observed_upper'], **interval_defaults)
if show_changepoints:
add_changepoints_to_plot(self, fcst, ax)
locator = date_locator or AutoDateLocator(interval_multiples=False)
formatter = date_formatter or AutoDateFormatter(locator)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
ax.set_xlabel(**xlabel_kwargs)
ax.set_ylabel(**ylabel_kwargs)
grid_y_defaults = dict(visible=True, axis='y', linestyle='--', alpha=0.3)
grid_y_defaults.update(grid_y_kwargs)
ax.grid(**grid_y_defaults)
ax.grid(visible=False, axis='x')
sns.despine(ax=ax, **despine_kwargs)
ticklabel_defaults = dict(rotation=45, horizontalalignment='right')
ticklabel_defaults.update(ticklabel_kwargs)
for label in ax.get_xticklabels():
label.set_rotation(ticklabel_defaults.get('rotation', 0))
label.set_horizontalalignment(ticklabel_defaults.get('horizontalalignment', 'center'))
if 'fontsize' in ticklabel_defaults:
label.set_fontsize(ticklabel_defaults['fontsize'])
if 'color' in ticklabel_defaults:
label.set_color(ticklabel_defaults['color'])
if not user_provided_ax:
fig.tight_layout()
if mark_anomalies:
anomaly_defaults = dict(color='#ff0000', edgecolor='w', s=20, alpha=0.7, label='Anomalies', ax=ax)
anomaly_defaults.update(anomaly_kwargs)
mask_history = fcst[self.timestamp_name].isin(self.history[self.timestamp_name])
anomaly_mask = (fcst.loc[mask_history, 'observed_upper'] < self.history[self.metric_name]) | (fcst.loc[mask_history, 'observed_lower'] > self.history[self.metric_name])
sns.scatterplot(x=self.history.loc[anomaly_mask, self.timestamp_name], y=self.history.loc[anomaly_mask, self.metric_name], **anomaly_defaults)
if show_capacity and self.model in ('binomial', 'beta-binomial'):
if self.vectorized:
capacity_label = self.capacity_name
capacity_values = self.history[self.capacity_name]
else:
capacity_label = 'capacity'
capacity_values = [self.model_backend.stan_data.capacity] * len(self.history)
capacity_defaults = {'color': 'grey', 'linestyle': '--', 'label': capacity_label}
capacity_defaults.update(capacity_kwargs)
ax.plot(self.history[self.timestamp_name], capacity_values, **capacity_defaults)
elif show_capacity and self.model not in ('binomial', 'beta-binomial'):
get_logger().warn(f"Ignoring 'show_capacity=True' as model '{self.model}' does not have a capacity.")
try:
ax.get_legend().remove()
except AttributeError:
pass
if include_legend:
ax.legend(frameon=True, shadow=True, loc='best', fontsize=10)
plt.show()
return
def plot_components(self: Self, fcst: pd.DataFrame, uncertainty: bool=True, weekly_start: int=0, plot_kwargs: Optional[dict[str, Any]]=None, line_kwargs: Optional[dict[str, Any]]=None, interval_kwargs: Optional[dict[str, Any]]=None, xlabel_kwargs: Optional[dict[str, Any]]=None, ylabel_kwargs: Optional[dict[str, Any]]=None, grid_y_kwargs: Optional[dict[str, Any]]=None, despine_kwargs: Optional[dict[str, Any]]=None, ticklabel_kwargs: Optional[dict[str, Any]]=None, rcparams_kwargs: Optional[dict[str, Any]]=None, style_kwargs: Optional[dict[str, Any]]=None) -> plt.Figure:
"""
Plot forecast components of a Gloria model using a modern Seaborn
style, with global kwargs applied to all subplots.
Parameters
----------
fcst : :class:`pandas.DataFrame`
Forecast DataFrame from the model, used for plotting trend and
uncertainty.
uncertainty : bool, default True
Whether to include uncertainty intervals in the trend component
plot.
weekly_start : int, default 0
Starting day of the week (0=Monday) for weekly seasonal plots.
plot_kwargs : dict, optional
Keyword arguments passed to matplotlib.subplots() for figure and
axes creation (e.g., figsize, dpi).
line_kwargs : dict, optional
Styling kwargs for lines in all components (e.g., color, linewidth)
interval_kwargs : dict, optional
Styling kwargs for uncertainty intervals in all components
(e.g., alpha, color).
xlabel_kwargs : dict, optional
Keyword arguments for x-axis labels in all components.
ylabel_kwargs : dict, optional
Keyword arguments for y-axis labels in all components.
grid_y_kwargs : dict, optional
Keyword arguments for customizing the y-axis grid appearance.
despine_kwargs : dict, optional
Keyword arguments passed to seaborn.despine() for spine removal.
ticklabel_kwargs : dict, optional
Keyword arguments to customize tick labels in all components.
rcparams_kwargs : dict, optional
Matplotlib rcParams overrides for all components (e.g., font sizes)
style_kwargs : dict, optional
Seaborn style kwargs for all components (e.g., style presets).
Returns
-------
None
"""
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
plot_kwargs = plot_kwargs or {}
line_kwargs = line_kwargs or {}
interval_kwargs = interval_kwargs or {}
xlabel_kwargs = xlabel_kwargs or {}
ylabel_kwargs = ylabel_kwargs or {}
grid_y_kwargs = grid_y_kwargs or {}
despine_kwargs = despine_kwargs or {}
ticklabel_kwargs = ticklabel_kwargs or {}
rcparams_kwargs = rcparams_kwargs or {}
style_kwargs = style_kwargs or {}
style_defaults = {'style': 'whitegrid'}
style_defaults.update(style_kwargs)
sns.set(**style_defaults)
rcparams_defaults = {'font.size': 14, 'font.family': 'DejaVu Sans', 'axes.titlesize': 18, 'axes.labelsize': 16, 'legend.fontsize': 14, 'axes.edgecolor': '#333333', 'axes.linewidth': 1.2}
rcparams_defaults.update(rcparams_kwargs)
plt.rcParams.update(rcparams_defaults)
with plt.rc_context(rc=rcparams_defaults):
with sns.axes_style(style_defaults):
components = ['trend']
components.extend(self.model_extra['seasonalities'].keys())
if self.model_extra['events'].keys():
components.append('events')
if self.model_extra['external_regressors'].keys():
components.append('external_regressors')
npanel = len(components)
ncols = int(np.floor(np.sqrt(npanel)))
nrows = int(np.ceil(npanel / ncols))
plot_defaults = {'nrows': nrows, 'ncols': ncols, 'figsize': (int(4.5 * ncols), int(3.2 * nrows)), 'facecolor': 'w', 'dpi': 150}
plot_defaults.update(plot_kwargs)
fig, axes = plt.subplots(**plot_defaults)
if isinstance(axes, np.ndarray):
axes = axes.flatten()
else:
axes = [axes]
for ax, comp in zip(axes, components):
if comp == 'trend':
plot_trend_component(m=self, fcst=fcst, component='trend', ax=ax, uncertainty=uncertainty, plot_kwargs=plot_kwargs, line_kwargs=line_kwargs, interval_kwargs=interval_kwargs, xlabel_kwargs=xlabel_kwargs, ylabel_kwargs=ylabel_kwargs, grid_y_kwargs=grid_y_kwargs, ticklabel_kwargs=ticklabel_kwargs, rcparams_kwargs=rcparams_kwargs, style_kwargs=style_kwargs)
elif comp in self.model_extra['seasonalities'].keys():
plot_seasonality_component(m=self, component=comp, start_offset=weekly_start, period=int(np.rint(self.model_extra['seasonalities'][comp].period)), ax=ax, plot_kwargs=plot_kwargs, line_kwargs=line_kwargs, interval_kwargs=interval_kwargs, xlabel_kwargs=xlabel_kwargs, ylabel_kwargs=ylabel_kwargs, grid_y_kwargs=grid_y_kwargs, ticklabel_kwargs=ticklabel_kwargs, rcparams_kwargs=rcparams_kwargs, style_kwargs=style_kwargs)
elif comp in ['events', 'external_regressors']:
plot_event_component(m=self, component=comp, ax=ax, plot_kwargs=plot_kwargs, line_kwargs=line_kwargs, interval_kwargs=interval_kwargs, xlabel_kwargs=xlabel_kwargs, ylabel_kwargs=ylabel_kwargs, grid_y_kwargs=grid_y_kwargs, ticklabel_kwargs=ticklabel_kwargs, rcparams_kwargs=rcparams_kwargs, style_kwargs=style_kwargs)
sns.despine(ax=ax, **despine_kwargs)
for ax in axes[npanel:]:
ax.set_visible(False)
fig.tight_layout()
plt.show()
return
...
|
class Gloria(BaseModel):
'''
The Gloria forecaster object is the central hub for the entire modeling
workflow.
Gloria objects are initialized with parameters controlling the fit and
prediction behaviour. Features such as ``seasonalities``, ``external
regressors``, and ``events`` (or collection of such using ``protocols``)
are added to Gloria objects. Once set up, :meth:`~Gloria.fit`,
:meth:`~Gloria.predict`, or :meth:`~Gloria.plot` methods are available to
fit the model to input data and visualize the results.
Parameters
----------
model : str
The distribution model to be used. Can be any of ``"poisson"``,
``"binomial"``, ``"negative binomial"``, ``"gamma"``, ``"beta"``,
``"beta-binomial"``, or ``"normal"``. See :ref:`Model Selection
<ref-model-selection>` tutorial for further information.
sampling_period : Union[pd.Timedelta, str]
Minimum spacing between two adjacent samples either as ``pd.Timedelta``
or a compatible string such as ``"1d"`` or ``"20 min"``.
timestamp_name : str, optional
The name of the timestamp column as expected in the input data frame
for :meth:`~Gloria.fit`.
metric_name : str, optional
The name of the expected metric column of the input data frame for
:meth:`~Gloria.fit`.
capacity_name : str, optional
The name of the column containing capacity data for the models
``"binomial"`` and ``"beta-binomial"``.
changepoints : pd.Series, optional
List of timestamps at which to include potential changepoints. If not
specified (default), potential changepoints are selected automatically.
n_changepoints : int, optional
Number of potential changepoints to include. Not used if input
'changepoints' is supplied. If ``changepoints`` is not supplied, then
``n_changepoints`` potential changepoints are selected uniformly from
the first ``changepoint_range`` proportion of the history. Must be a
positive integer.
changepoint_range : float, optional
Proportion of history in which trend changepoints will be estimated.
Must be in range [0,1]. Not used if ``changepoints`` is specified.
seasonality_prior_scale : float, optional
Parameter modulating the strength of the seasonality model. Larger
values allow the model to fit larger seasonal fluctuations, smaller
values dampen the seasonality. Can be specified for individual
seasonalities using :meth:`add_seasonality`. Must be larger than 0.
event_prior_scale : float, optional
Parameter modulating the strength of additional event regressors.
Larger values allow the model to fit larger event impact, smaller
values dampen the event impact. Can be specified for individual
events using :meth:`add_event`. Must be larger than 0.
changepoint_prior_scale : float, optional
Parameter modulating the flexibility of the automatic changepoint
selection. Large values will allow many changepoints, small values will
allow few changepoints. Must be larger than 0.
dispersion_prior_scale : float, optional
Parameter controlling the flexibility of the dispersion (i.e. allowed
variance) of the model. Larger values allow more dispersion. This
parameter does not affect the binomial and poisson model. Must be
larger than one.
interval_width : float, optional
Width of the uncertainty intervals provided for the prediction. It is
used for both uncertainty intervals of the expected value (fit) as
well as the observed values (observed). Must be in range [0,1].
trend_samples : int, optional
Number of simulated draws used to estimate uncertainty intervals of the
*trend* in prediction periods that were not included in the historical
data. Settings this value to 0 will disable uncertainty estimation.
Must be greater equal to 0.
'''
@field_validator('sampling_period')
@classmethod
def validate_sampling_period(cls: Type[Self], sampling_period: pd.Timedelta) -> pd.Timedelta:
'''
Converts sampling period to a pandas Timedelta if it was passed as a
string instead.
'''
pass
@field_validator('changepoints', mode='before')
@classmethod
def validate_changepoints(cls: Type[Self], changepoints: Optional[SeriesData]) -> pd.Series:
'''
Converts changepoints input to pd.Series
'''
pass
def __init__(self: Self, *args: tuple[Any, ...], **kwargs: dict[str, Any]) -> None:
'''
Initializes Gloria model.
Parameters
----------
*args : tuple[Any, ...]
Positional arguments passed through to Pydantic Model __init__()
**kwargs : dict[str, Any]
Keyword arguments passed through to Pydantic Model __init__()
'''
pass
@property
def is_fitted(self: Self) -> bool:
'''
Determines whether the present :class:`Gloria` model is fitted.
This property is *read-only*.
Returns
-------
bool
``True`` if fitted, ``False`` otherwise.
'''
pass
def validate_column_name(self: Self, name: str, check_seasonalities: bool=True, check_events: bool=True, check_external_regressors: bool=True) -> None:
'''
Validates the name of a seasonality, an event or an external regressor.
Parameters
----------
name : str
The name to validate.
check_seasonalities : bool, optional
Check if name already used for seasonality. The default is True.
check_events : bool, optional
Check if name already used as an event. The default is True.
check_external_regressors : bool, optional
Check if name already used for regressor The default is True.
Raises
------
TypeError
If the passed ``name`` is not a string
ValueError
Raised in case the ``name`` is not valid for any reason.
'''
pass
def add_seasonality(self: Self, name: str, period: str, fourier_order: int, prior_scale: Optional[float]=None) -> Self:
'''
Adds a seasonality to the Gloria object.
From the seasonality, even and odd Fourier series components up to a
user-defined maximum order will be generated and used as regressors
during fitting and predicting.
Parameters
----------
name : str
A descriptive name of the seasonality.
period : str
Fundamental period of the seasonality component. Should be a string
compatible with ``pd.Timedelta`` (eg. ``"1d"`` or ``"12 h"``).
fourier_order : int
All Fourier terms from fundamental up to ``fourier_order`` will be
used as regressors.
prior_scale : float, optional
The regression coefficient is given a prior with the specified
scale parameter. Decreasing the prior scale will add additional
regularization. If None is given self.seasonality_prior_scale will
be used (default). Must be greater than 0.
Raises
------
:class:`~gloria.utilities.errors.FittedError`
Raised in case the method is called on a fitted ``Gloria`` model.
ValueError
Raised when ``prior scale`` or ``period`` are not allowed values.
Returns
-------
Gloria
Updated Gloria object
'''
pass
def add_event(self: Self, name: str, regressor_type: str, profile: Union[Profile, dict[str, Any]], prior_scale: Optional[float]=None, include: Union[bool, Literal['auto']]='auto', **regressor_kwargs: Any) -> Self:
'''
Adds an event to the Gloria object.
The event will be treated as a regressor during fitting and predicting.
Parameters
----------
name : str
A descriptive name of the event.
regressor_type : str
Type of the underlying event regressor. Must be any of
``"SingleEvent"``, ``"IntermittentEvent"``, ``"PeriodicEvent"``,
``"Holiday"``
profile : Union[Profile, dict[str, Any]]
The base profile used by the event regressor. Must be either of
type :class:`Profile` or a dictionary an event can be constructed
from using :meth:`Profile.from_dict`
prior_scale : float
The regression coefficient is given a prior with the specified
scale parameter. Decreasing the prior scale will add additional
regularization. Must be large than 0.
include : Union[bool, Literal["auto"]]
If set to ``"auto"`` (default), the event regressor will be
excluded from the model during :meth:`fit`, if its overlap with the
data is negligible. this behaviour can be overwritten by setting
``include`` to ``True`` or ``False``
**regressor_kwargs : Any
Additional keyword arguments necessary to create the event
regressor specified by ``regressor_type``.
Raises
------
:class:`~gloria.utilities.errors.FittedError`
Raised in case the method is called on a fitted ``Gloria`` model.
ValueError
Raised in case of invalid ``prior_scale`` or ``include`` values.
Returns
-------
Gloria
The ``Gloria`` model updated with the new event
'''
pass
def add_external_regressor(self: Self, name: str, prior_scale: float) -> Self:
'''
Add an external regressor to the Gloria object.
The external regressor will be used for fitting and predicting.
Parameters
----------
name : str
A descriptive name of the regressor. The dataframes passed to
:meth:`fit` and :meth:`predict` must have a column with the
specified name. The values in these columns are used for the
regressor.
prior_scale : float
The regression coefficient is given a prior with the specified
scale parameter. Decreasing the prior scale will add additional
regularization. Must be greater than 0.
Raises
------
:class:`~gloria.utilities.errors.FittedError`
Raised in case the method is called on a fitted Gloria model.
ValueError
Raised in case of an invalid ``prior_scale`` value.
Returns
-------
Gloria
Updated Gloria object
'''
pass
def add_protocol(self: Self, protocol: Protocol) -> Self:
'''
Add a protocol to the Gloria object.
Protocols provide additional, automated routines for setting up the
model during :meth:`fit`. As of now, only the
:class:`~gloria.CalendricData` protocol is implemented.
Parameters
----------
protocol : Protocol
The Protocol object to be added.
Raises
------
:class:`~gloria.utilities.errors.FittedError`
Raised in case the method is called on a fitted Gloria model.
TypeError
Raised when the provided ``protocol`` is not a valid Protocol
object.
Returns
-------
Gloria
Updated Gloria model.
'''
pass
def validate_metric_column(self: Self, df: pd.DataFrame, name: str, col_type: Literal['Metric', 'Capacity']='Metric') -> None:
'''
Validate that the metric column exists and contains only valid values.
Parameters
----------
df : pd.DataFrame
Input pandas DataFrame of data to be fitted.
name : str
The metric column name
col_type : Literal["Metric", "Capacity"], optional
Specifies whether the metric column or capacity column is to be
validated. The default is "Metric".
Raises
------
KeyError
Raised if the metric column doesn't exist in the DataFrame
TypeError
Raised if the metric columns dtype does not fit to the model
ValueError
Raised if there are any NaNs in the metric column
'''
pass
def validate_dataframe(self: Self, df: pd.DataFrame) -> pd.DataFrame:
'''
Validates that the input data frame of the fitting-method adheres to
all requirements.
Parameters
----------
df : pd.DataFrame
DataFrame that contains at the very least a timestamp column with
name self.timestamp_name and a numeric column with name
self.metric_name. If the Gloria models is 'binomial' and
'beta-binomial' in vectorized capacity form are to be used, a
column with name self.capacity_name must exists. If external
regressors were added to the model, the respective columns must be
present as well.
Returns
-------
pd.DataFrame
Validated DataFrame that is reduced to timestamp, metric and
external regressor columns.
'''
pass
def set_changepoints(self: Self) -> Self:
'''
Sets changepoints
Sets changepoints to the dates and corresponding integer values of
changepoints. The following cases are handled:
1. The changepoints were passed in explicitly.
a. They are empty.
b. They are not empty, and need validation.
2. We are generating a grid of them.
3. The user prefers no changepoints be used.
'''
pass
def time_to_integer(self: Self, history: pd.DataFrame) -> pd.DataFrame:
'''
Create a new column from timestamp column of input data frame that
contains corresponding integer values with respect to sampling_delta.
Parameters
----------
history : pd.DataFrame
Validated input data frame of fit method
Returns
-------
history : pd.DataFrame
Updated data frame
'''
pass
def make_all_features(self: Self, data: Optional[pd.DataFrame]=None) -> tuple[pd.DataFrame, dict[str, float]]:
'''
Creates the feature matrix X containing all regressors used in the fit
and for prediction. Also returns prior scales for all features.
Parameters
----------
data : Optional[pd.DataFrame], optional
Input dataframe. It must contain at least a column with integer
timestamps (for column name cf. to _T_INT constant) as well as
the external regressor columns associated with the model. Default
of data is None, in which case the model history will be used.
Returns
-------
X : pd.DataFrame
Feature matrix with columns for the different features and rows
corresponding to the timestamps
prior_scales : dict[str,float]
A dictionary mapping feature -> prior scale
'''
pass
def preprocess(self: Self, data: pd.DataFrame) -> ModelInputData:
'''
Validates input data and prepares the model with respect to the data.
Parameters
----------
data : pd.DataFrame
DataFrame that contains at the very least a timestamp column with
name self.timestamp_name and a numeric column with name
self.metric_name. If external regressors were added to the model,
the respective columns must be present as well.
Returns
-------
ModelInputData
Data as used by the model backend. Contains the inputs that all
stan models have in common.
'''
pass
def fit(self: Self, data: pd.DataFrame, toml_path: Optional[Union[str, Path]]=None, **kwargs: dict[str, Any]) -> Self:
'''
Fits the Gloria object.
The fitting routine validates input data, sets up the model based on
all input parameters, added regressors or protocols, and eventually
calls the model backend for the actual fitting.
Parameters
----------
data : pd.DataFrame
A pandas DataFrame containing timestamp and metric columns named
according to ``self.timestamp_name`` and ``self.metric_name``,
respectively. If *external regressors* were added to the model, the
respective columns must be present as well.
toml_path : Optional[Union[str, Path]], optional
Path to an optional configuration TOML file that contains a section
keyed by ``[fit]``. If *None* (default), TOML configuration is
skipped. TOML configuration precedes model settings saved in
``self._config`` as well as default settings.
optimize_mode : str, optional
If ``"MAP"`` (default), the optimization step yields the Maximum A
Posteriori estimation, if ``"MLE"`` a Maximum likehood estimation.
use_laplace : bool, optional
If ``True`` (default), the optimization is followed by a sampling
over the Laplace approximation around the posterior mode.
capacity : int, optional
An upper bound used for ``binomial`` and ``beta-binomial`` models.
Note that ``capacity`` must be >= all values in the metric column
of ``data``. Default is ``None``. Specifying ``capacity`` is
mutually exclusive with providing a ``capacity_mode`` and
``capacity_value`` pair.
capacity_mode : str, optional
A method used to estimate the capacity. Three modes are available:
- ``"constant"``: The provided ``capacity_value`` equals the
capacity.
- ``"factor"``: The capacity is the maximum of the response
variable times ``capacity_value``.
- ``"scale"``: The capacity is optimized such that the response
variable is distributed around the expectation value
:math:`N \times p` with :math:`N=` capacity and :math:`p=`
``capacity_value``. This mode is the default using
``capacity_value=0.5``.
capacity_value : float, optional
A value associated with the selected ``capacity_mode``:
- If ``capacity_mode="constant"``, ``capacity_value`` must be an
integer :math:`\ge` the maximum of the response variable.
- If ``capacity_mode="factor"``, ``capacity_value`` must be
:math:`\ge` 1.
- If ``capacity_mode="scale"``, ``capacity_value`` must be in
:math:`[0,1]`.
Raises
------
:class:`~gloria.utilities.errors.FittedError`
Raised in case the method is called on a fitted ``Gloria`` model.
Returns
-------
Gloria
Updated Gloria object.
Notes
-----
The configuration of the ``fit`` method is composed of four layers,
each one overriding the previous:
1. **Model defaults** - baseline configuration with default values.
2. **Global TOML file** - key-value pairs in the ``[fit]`` section of
the TOML file passed to :meth:`Gloria.from_toml`, if the instance
was created that way.
3. **Local TOML file** - key-value pairs in the ``[fit]`` section of
the TOML file provided via ``toml_path``.
4. **Keyword overrides** - additional arguments provided via
``**kwargs`` take highest precedence.
'''
pass
def predict(self: Self, data: Optional[pd.DataFrame]=None, toml_path: Optional[Union[str, Path]]=None, **kwargs: dict[str, Any]) -> pd.DataFrame:
'''
Generate forecasts from a *fitted* :class:`Gloria` model.
Two usage patterns are supported:
1. **Explicit input dataframe** - ``data`` contains future
(or historical) timestamps plus any required external-regressor
columns.
2. **Auto-generated future dataframe** - leave ``data`` as ``None`` and
supply the helper kwargs ``periods`` and/or ``include_history``.
This shortcut only works when the model has *no* external
regressors.
Parameters
----------
data : Optional[pd.DataFrame], optional
A pandas DataFrame containing timestamp and metric columns named
according to ``self.timestamp_name`` and ``self.metric_name``,
respectively. If *external regressors* were added to the model,
the respective columns must be present as well. If ``None``, a
future dataframe is produced with :meth:`make_future_dataframe`.
toml_path : Optional[Union[str, Path]], optional
Path to a TOML file whose ``[predict]`` section should be merged
into the configuration. Ignored when ``None``.
periods : int
Number of future steps to generate. Must be a positive integer.
Measured in units of``self.sampling_period``. The default is ``1``.
include_history : bool, optional
If ``True`` (default), the returned frame includes the historical
dates that wereseen during fitting; if ``False`` it contains only
the future portion.
Returns
-------
prediction : pd.DataFrame
A dataframe containing timestamps, predicted metric, trend, and
lower and upper bounds.
Notes
-----
The configuration of the predict method via ``periods`` and ``include``
is composed in four layers, each one overriding the previous:
1. **Model defaults** - the baseline configuration with defaults given
above.
2. **Global TOML file** - key-value pairs in the ``[predict]`` table of
the TOML file passed to :meth:`Gloria.from_toml` if the current
Gloria instance was created this way.
3. **Local TOML file** - key-value pairs in the ``[predict]`` table of
the TOML file provided for ``toml_path``.
4. **Keyword overrides** - additional arguments supplied directly to
the method take highest precedence.
'''
pass
def make_future_dataframe(self: Self, periods: int=1, include_history: bool=True) -> pd.DataFrame:
'''
Build a timestamp skeleton that extends the training horizon.
This helper is typically used when you plan to call :meth:`predict`.
It produces a frame with a single column, named according to
``self.timestamp_name``, whose values
* start one sampling step after the last training timestamp, and
* continue for ``periods`` intervals spaced by
``self.sampling_period``.
If ``include_history`` is ``True`` the original training timestamps are
prepended, yielding a contiguous timeline from the first observed point
up to the requested forecast horizon.
Parameters
----------
periods : int
Number of future steps to generate. Must be a positive integer.
Measured in units of ``self.sampling_period``. The default is
``1``.
include_history : bool, optional
If ``True`` (default), the returned frame includes the historical
dates that wereseen during fitting; if ``False`` it contains only
the future portion.
Raises
------
NotFittedError
The model has not been fitted yet.
TypeError
If ``preriods`` is not an integer.
ValueError
If ``periods`` is < 1.
Returns
-------
future_df : pd.DataFrame
A dataframe with a single column ``self.timestamp_name`` containing
``pd.Timestamps``. It can be passed directly to :meth:`predict`
if the model has no external regressors. When the model relies on
external regressors you must merge the appropriate regressor
columns into ``future_df`` before forecasting.
'''
pass
def load_data(self: Self, toml_path: Optional[Union[str, Path]]=None, **kwargs: dict[str, Any]) -> pd.DataFrame:
'''
Load and configure the time-series input data for fit method.
Reads a .csv-file that must contain at least two columns: a timestamp
and a metric column named according to ``self.timestamp_name`` and
``self.metric_name``, respectively. The timestamp column is converted
to a series of ``pd.Timestamps`` and the metric column is cast to
``dtype_kind``.
Parameters
----------
toml_path : Optional[Union[str, Path]], optional
Path to a TOML file whose ``[load_data]`` section overrides the
model defaults. Ignored when ``None``.
source : Union[str, Path]
Location of the CSV file to load the input data from. This key must
be provided.
dtype_kind : bool, optional
Desired *kind* of the metric column as accepted by NumPy
(``"u"`` unsigned int, ``"i"`` signed int, ``"f"`` float, ``"b"``
boolean). If omitted, the metric dtype is cast to float.
Returns
-------
data : pandas.DataFrame
The preprocessed dataframe ready for modelling
Notes
-----
The configuration of the ``load_data`` method via ``source`` and
``dtype_kind`` is composed in four layers, each one overriding the
previous:
1. **Model defaults** - the baseline configuration with defaults given
above.
2. **Global TOML file** - key-value pairs in the ``[load_data]`` table
of the TOML file passed to :meth:`Gloria.from_toml` if the current
Gloria instance was created this way.
3. **Local TOML file** - key-value pairs in the ``[load_data]`` table
of the TOML file provided for ``toml_path``.
4. **Keyword overrides** - additional arguments supplied directly to
the method take highest precedence.
'''
pass
def to_dict(self: Self) -> dict[str, Any]:
'''
Converts Gloria object to a dictionary of JSON serializable types.
Only works on fitted Gloria objects.
The method calls :func:`model_to_dict` on ``self``.
Returns
-------
dict[str, Any]
JSON serializable dictionary containing data of Gloria model.
'''
pass
def to_json(self: Self, filepath: Optional[Path]=None, **kwargs: Any) -> str:
'''
Converts Gloria object to a JSON string.
Only works on fitted Gloria objects. If desired the model is
additionally dumped to a .json-file.
The method calls :func:`model_to_json` on ``self``.
Parameters
----------
filepath : Optional[Path], optional
Filepath of the target .json-file. If ``None`` (default) no output-
file will be written.
**kwargs : Any
Keyword arguments which are passed through to :func:`json.dump` and
:func:`json.dumps`
Raises
------
ValueError
In case the given filepath does not have .json extension.
Returns
-------
str
JSON string containing the model data of the fitted Gloria object.
'''
pass
@staticmethod
def from_dict(model_dict: dict[str, Any]) -> 'Gloria':
'''
Restores a fitted Gloria model from a dictionary.
The input dictionary must be the output of :func:`model_to_dict` or
:meth:`Gloria.to_dict`.
The method calls :func:`model_from_dict` on ``self``.
Parameters
----------
model_dict : dict[str, Any]
Dictionary containing the Gloria object data.
Returns
-------
Gloria
Input data converted to a fitted Gloria object.
'''
pass
@staticmethod
def from_json(model_json: Union[Path, str], return_as: Literal['dict', 'model']='model') -> Union[dict[str, Any], 'Gloria']:
'''
Restores a fitted Gloria model from a json string or file.
The input json string must be the output of :func:`model_to_json` or
:meth:`Gloria.to_json`. If the input is a json-file, its contents is
first read to a json string.
The method calls :func:`model_from_json` on ``self``.
Parameters
----------
model_json : Union[Path, str]
Filepath of .json-model file or string containing the data.
return_as : Literal['dict', 'model'], optional
If ``dict`` (default), the model is returned in dictionary format,
if ``model`` as fitted Gloria object.
Raises
------
ValueError
Two ValueErrors are possible:
1. In case the given filepath does not have .json extension
2. If ``return_as`` is neither ``"dict"`` nor ``"model"``
Returns
-------
Union[dict[str, Any], Gloria]
Gloria object or dictionary representing the Gloria object based on
input json data.
'''
pass
@staticmethod
def from_toml(toml_path: Union[str, Path], ignore: Union[Collection[str], str]=set(), **kwargs:
'''
Instantiate and configure a Gloria object from a TOML configuration
file.
The TOML file is expected to have the following top-level tables /
arrays-of-tables (all are optional except ``[model]``):
* ``[model]`` - keyword arguments passed directly to the
:class:`Gloria` constructor.
* ``[[external_regressors]]`` - one table per regressor; each is
forwarded to :meth:`~Gloria.add_external_regressor`.
* ``[[seasonalities]]`` - one table per seasonality; each is
forwarded to :meth:`~Gloria.add_seasonality`.
* ``[[events]]`` - one table per event; each is forwarded to
:meth:`~Gloria.add_event`.
* ``[[protocols]]`` - one table per protocol. Each table **must**
contain a ``type`` key that maps to a protocol class name; the
remaining keys are passed to that class before calling
:meth:`~Gloria.add_protocol`.
Defaults as defined in :class:`Gloria` constructor or respective
methods are used for all keys not provided in the TOML file. ``kwargs``
can be used to overwrite keys found in the ``[model]`` table.
Parameters
----------
toml_path : Union[str, Path]
Path to the TOML file containing the model specification.
ignore : Union[Collection[str],str], optional
Which top-level sections of the file to skip. Valid values are
``"external_regressors"``, ``"seasonalities"``, ``"events"``, and
``"protocols"``. The special value ``"all"`` suppresses every
optional section. May be given as a single string or any iterable
of strings.
**kwargs : dict[str, Any]
Keyword arguments that override or extend the ``[model]`` table.
Only keys that are valid fields of Gloria (i.e. that appear in
Gloria.model_fields) are retained; others are silently dropped.
Returns
-------
Gloria
A fully initialized Gloria instance.
.. seealso::
:func:`model_from_toml`
An alias
Notes
-----
Precedence order for :class:`Gloria` constructor arguments from highest
to lowest is:
1. Values supplied via ``kwargs``
2. Values found in the TOML ``[model]`` table
3. Gloria's own defaults
'''
pass
@staticmethod
def Foster():
pass
def plot(self: Self, fcst: pd.DataFrame, ax: Optional[plt.Axes]=None, uncertainty: Optional[bool]=True, show_changepoints: Optional[bool]=False, include_legend: Optional[bool]=False, mark_anomalies: Optional[bool]=False, show_capacity: Optional[bool]=False, plot_kwargs: Optional[dict[str, Any]]=None, rcparams_kwargs: Optional[dict[str, Any]]=None, style_kwargs: Optional[dict[str, Any]]=None, scatter_kwargs: Optional[dict[str, Any]]=None, trend_kwargs: Optional[dict[str, Any]]=None, forecast_kwargs: Optional[dict[str, Any]]=None, interval_kwargs: Optional[dict[str, Any]]=None, xlabel_kwargs: Optional[dict[str, Any]]=None, ylabel_kwargs: Optional[dict[str, Any]]=None, grid_y_kwargs: Optional[dict[str, Any]]=None, despine_kwargs: Optional[dict[str, Any]]=None, ticklabel_kwargs: Optional[dict[str, Any]]=None, anomaly_kwargs: Optional[dict[str, Any]]=None, capacity_kwargs: Optional[dict[str, Any]]=None, date_locator: Optional[Locator]=None, date_formatter: Optional[Formatter]=None) -> plt.Figure:
'''
Plot the forecast, trend, and observed data with extensive
customization options.
Parameters
----------
fcst : pandas.DataFrame
Forecast DataFrame containing:
- Timestamp column (matching `self.timestamp_name`)
- 'yhat' (predicted values)
- 'trend' (trend component)
- 'observed_lower' and 'observed_upper' (confidence intervals)
ax : matplotlib.axes.Axes, optional
Existing matplotlib axis to draw on.
If None, a new figure and axis will be created.
uncertainty : bool, default=True
Whether to plot the confidence interval bands.
show_changepoints : bool, default=False
Whether to annotate changepoints in the forecast.
include_legend : bool, default=False
Whether to include a legend in the plot.
plot_kwargs : dict, optional
Arguments for `plt.subplots()` if creating a new figure.
rcparams_kwargs : dict, optional
Overrides for matplotlib rcParams to control global styling.
style_kwargs : dict, optional
Keyword arguments passed to `sns.set()` for Seaborn style
configuration.
scatter_kwargs : dict, optional
Styling for the historical data scatter plot (`sns.scatterplot`).
trend_kwargs : dict, optional
Styling for the trend line (`ax.plot`).
forecast_kwargs : dict, optional
Styling for the forecast line (`ax.plot`).
interval_kwargs : dict, optional
Styling for the confidence interval area (`ax.fill_between`).
xlabel_kwargs : dict, optional
Settings for the x-axis label (`ax.set_xlabel`).
ylabel_kwargs : dict, optional
Settings for the y-axis label (`ax.set_ylabel`).
grid_y_kwargs : dict, optional
Settings for the y-axis gridlines (`ax.grid`).
despine_kwargs : dict, optional
Arguments to `sns.despine()` for removing spines.
ticklabel_kwargs : dict, optional
Settings for customizing tick labels (rotation, alignment,
fontsize).
anomaly_kwargs: dict, optional
Styling for the anomaly data scatter plot (`sns.scatterplot`).
capacity_kwargs: dict, optional
Styling for the capcity line (`ax.plot`).
date_locator : matplotlib.ticker.Locator, optional
Locator for x-axis ticks. Defaults to `AutoDateLocator`.
date_formatter : matplotlib.ticker.Formatter, optional
Formatter for x-axis tick labels. Defaults to `AutoDateFormatter`.
Returns
-------
None
Raises
------
NotFittedError
If the model has not been fitted before calling this method.
Notes
-----
This method is designed for flexible, reproducible, and highly
customizable visualization of forecasts and their uncertainty
intervals. You can control nearly every aspect of the figure appearance
via the provided keyword argument dictionaries.
Examples
--------
Basic usage:
>>> model.plot(fcst)
Custom scatter point styling:
>>> model.plot(fcst, scatter_kwargs={"s": 40,
"color": "purple"})
Specifying figure size and dpi:
>>> model.plot(fcst, plot_kwargs={"figsize": (12, 8),
"dpi": 200})
'''
pass
def plot_components(self: Self, fcst: pd.DataFrame, uncertainty: bool=True, weekly_start: int=0, plot_kwargs: Optional[dict[str, Any]]=None, line_kwargs: Optional[dict[str, Any]]=None, interval_kwargs: Optional[dict[str, Any]]=None, xlabel_kwargs: Optional[dict[str, Any]]=None, ylabel_kwargs: Optional[dict[str, Any]]=None, grid_y_kwargs: Optional[dict[str, Any]]=None, despine_kwargs: Optional[dict[str, Any]]=None, ticklabel_kwargs: Optional[dict[str, Any]]=None, rcparams_kwargs: Optional[dict[str, Any]]=None, style_kwargs: Optional[dict[str, Any]]=None) -> plt.Figure:
'''
Plot forecast components of a Gloria model using a modern Seaborn
style, with global kwargs applied to all subplots.
Parameters
----------
fcst : :class:`pandas.DataFrame`
Forecast DataFrame from the model, used for plotting trend and
uncertainty.
uncertainty : bool, default True
Whether to include uncertainty intervals in the trend component
plot.
weekly_start : int, default 0
Starting day of the week (0=Monday) for weekly seasonal plots.
plot_kwargs : dict, optional
Keyword arguments passed to matplotlib.subplots() for figure and
axes creation (e.g., figsize, dpi).
line_kwargs : dict, optional
Styling kwargs for lines in all components (e.g., color, linewidth)
interval_kwargs : dict, optional
Styling kwargs for uncertainty intervals in all components
(e.g., alpha, color).
xlabel_kwargs : dict, optional
Keyword arguments for x-axis labels in all components.
ylabel_kwargs : dict, optional
Keyword arguments for y-axis labels in all components.
grid_y_kwargs : dict, optional
Keyword arguments for customizing the y-axis grid appearance.
despine_kwargs : dict, optional
Keyword arguments passed to seaborn.despine() for spine removal.
ticklabel_kwargs : dict, optional
Keyword arguments to customize tick labels in all components.
rcparams_kwargs : dict, optional
Matplotlib rcParams overrides for all components (e.g., font sizes)
style_kwargs : dict, optional
Seaborn style kwargs for all components (e.g., style presets).
Returns
-------
None
'''
pass
| 37
| 27
| 77
| 9
| 35
| 32
| 5
| 0.95
| 1
| 28
| 9
| 0
| 21
| 14
| 27
| 109
| 2,219
| 280
| 999
| 249
| 857
| 949
| 464
| 136
| 433
| 17
| 5
| 4
| 128
|
328,363
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/models.py
|
gloria.models.Beta
|
from typing_extensions import Self, TypeAlias
from typing import Any, Callable, Literal, Mapping, Optional, Type, Union, cast
from scipy.stats import beta, betabinom, binom, gamma, nbinom, norm, poisson
import numpy as np
class Beta(ModelBackendBase):
"""
Implementation of model backend for beta distribution
"""
stan_file = BASEPATH / 'stan_models/beta.stan'
kind = 'f'
link_pair = LINK_FUNC_MAP['logit']
def quant_func(self: Self, level: float, yhat: np.ndarray, scale: float=1, **kwargs: Any) -> np.ndarray:
"""
Quantile function of the underlying distribution
Parameters
----------
level : float
Level of confidence in (0,1)
yhat : np.ndarray
Predicted values.
scale : Union[float, np.ndarray, None]
Scale parameter of the distribution.
Returns
-------
np.ndarray
Quantile at given level
"""
a = yhat * scale
b = (1 - yhat) * scale
return beta.ppf(level, a, b)
def preprocess(self: Self, stan_data: ModelInputData, **kwargs: Any) -> tuple[ModelInputData, ModelParams]:
"""
Augment the input data for the stan model with model dependent data
and calculate initial guesses for model parameters.
Parameters
----------
stan_data : ModelInputData
Model agnostic input data provided by the forecaster interface
Returns
-------
ModelInputData
Updated stan_data
ModelParams
Guesses for the model parameters depending on the data
"""
y_scaled, self.linked_offset, self.linked_scale = self.normalize_data(y=stan_data.y, lower_bound=True, upper_bound=True)
stan_data.linked_offset = self.linked_offset
stan_data.linked_scale = self.linked_scale
ini_params = self.initial_trend_parameters(y_scaled, stan_data)
stan_data.variance_max = self.estimate_variance(stan_data, ini_params)
return (stan_data, ini_params)
|
class Beta(ModelBackendBase):
'''
Implementation of model backend for beta distribution
'''
def quant_func(self: Self, level: float, yhat: np.ndarray, scale: float=1, **kwargs: Any) -> np.ndarray:
'''
Quantile function of the underlying distribution
Parameters
----------
level : float
Level of confidence in (0,1)
yhat : np.ndarray
Predicted values.
scale : Union[float, np.ndarray, None]
Scale parameter of the distribution.
Returns
-------
np.ndarray
Quantile at given level
'''
pass
def preprocess(self: Self, stan_data: ModelInputData, **kwargs: Any) -> tuple[ModelInputData, ModelParams]:
'''
Augment the input data for the stan model with model dependent data
and calculate initial guesses for model parameters.
Parameters
----------
stan_data : ModelInputData
Model agnostic input data provided by the forecaster interface
Returns
-------
ModelInputData
Updated stan_data
ModelParams
Guesses for the model parameters depending on the data
'''
pass
| 3
| 3
| 36
| 6
| 11
| 20
| 1
| 1.88
| 1
| 5
| 2
| 0
| 2
| 2
| 2
| 36
| 85
| 14
| 25
| 18
| 14
| 47
| 15
| 10
| 12
| 1
| 5
| 0
| 2
|
328,364
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/models.py
|
gloria.models.BetaBinomial
|
import numpy as np
from typing_extensions import Self, TypeAlias
from typing import Any, Callable, Literal, Mapping, Optional, Type, Union, cast
from scipy.stats import beta, betabinom, binom, gamma, nbinom, norm, poisson
class BetaBinomial(ModelBackendBase):
"""
Implementation of model backend for beta-binomial distribution
"""
stan_file = BASEPATH / 'stan_models/beta_binomial.stan'
kind = 'bu'
link_pair = LINK_FUNC_MAP['logit']
def yhat_func(self: Self, linked_arg: np.ndarray, scale: float=1, capacity_vec: Optional[np.ndarray]=None, **kwargs: Any) -> np.ndarray:
"""
Produces the predicted values yhat
Parameters
----------
linked_arg : np.ndarray
Linked GLM output
scale : Union[float, np.ndarray, None]
Scale parameter of the distribution.
Returns
-------
np.ndarray
Predicted values
"""
capacity = self.stan_data.capacity if capacity_vec is None else capacity_vec
return capacity * linked_arg
def quant_func(self: Self, level: float, yhat: np.ndarray, scale: float=1, capacity_vec: Optional[np.ndarray]=None, **kwargs: Any) -> np.ndarray:
"""
Quantile function of the underlying distribution
Parameters
----------
level : float
Level of confidence in (0,1)
yhat : np.ndarray
Predicted values.
scale : Union[float, np.ndarray, None]
Scale parameter of the distribution.
Returns
-------
np.ndarray
Quantile at given level
"""
capacity = self.stan_data.capacity if capacity_vec is None else capacity_vec
if self.use_laplace:
kappa = self.fit_params['kappa'].mean()
else:
kappa = self.fit_params['kappa']
scale = 4 * (capacity - 1) / (capacity * kappa ** 2) - 1
p = yhat / capacity
a = p * scale
b = (1 - p) * scale
return betabinom.ppf(level, capacity, a, b)
def preprocess(self: Self, stan_data: ModelInputData, capacity: Optional[int]=None, capacity_mode: Optional[str]=None, capacity_value: Optional[float]=None, vectorized: bool=False, **kwargs: Any) -> tuple[ModelInputData, ModelParams]:
"""
Augment the input data for the stan model with model dependent data
and calculate initial guesses for model parameters.
Parameters
----------
stan_data : ModelInputData
Model agnostic input data provided by the forecaster interface.
capacity : int, optional
An upper bound used for ``binomial`` and ``beta-binomial`` models.
Specifying ``capacity`` is mutually exclusive with providing a
``capacity_mode`` and ``capacity_value`` pair.
capacity_mode : str, optional
A method used to estimate the capacity. Must be eitherr ``"scale"``
or ``"factor"``.
capacity_value : float, optional
A value associated with the selected ``capacity_mode``.
vectorized : bool
If True, the capacity is already part of stan_data as a numpy
array. If False the capacity must be constructed from capacity
parameters
Returns
-------
ModelInputData
Updated stan_data
ModelParams
Guesses for the model parameters depending on the data
"""
if not vectorized:
capacity_settings = BinomialCapacity.from_parameters(capacity=capacity, capacity_mode=capacity_mode, capacity_value=capacity_value)
stan_data.capacity = get_capacity(y=stan_data.y, mode=capacity_settings.mode, value=capacity_settings.value)
y_scaled, self.linked_offset, self.linked_scale = self.normalize_data(y=stan_data.y, capacity=stan_data.capacity, lower_bound=True, upper_bound=True)
stan_data.linked_offset = self.linked_offset
stan_data.linked_scale = self.linked_scale
ini_params = self.initial_trend_parameters(y_scaled, stan_data)
return (stan_data, ini_params)
|
class BetaBinomial(ModelBackendBase):
'''
Implementation of model backend for beta-binomial distribution
'''
def yhat_func(self: Self, linked_arg: np.ndarray, scale: float=1, capacity_vec: Optional[np.ndarray]=None, **kwargs: Any) -> np.ndarray:
'''
Produces the predicted values yhat
Parameters
----------
linked_arg : np.ndarray
Linked GLM output
scale : Union[float, np.ndarray, None]
Scale parameter of the distribution.
Returns
-------
np.ndarray
Predicted values
'''
pass
def quant_func(self: Self, level: float, yhat: np.ndarray, scale: float=1, capacity_vec: Optional[np.ndarray]=None, **kwargs: Any) -> np.ndarray:
'''
Quantile function of the underlying distribution
Parameters
----------
level : float
Level of confidence in (0,1)
yhat : np.ndarray
Predicted values.
scale : Union[float, np.ndarray, None]
Scale parameter of the distribution.
Returns
-------
np.ndarray
Quantile at given level
'''
pass
def preprocess(self: Self, stan_data: ModelInputData, capacity: Optional[int]=None, capacity_mode: Optional[str]=None, capacity_value: Optional[float]=None, vectorized: bool=False, **kwargs: Any) -> tuple[ModelInputData, ModelParams]:
'''
Augment the input data for the stan model with model dependent data
and calculate initial guesses for model parameters.
Parameters
----------
stan_data : ModelInputData
Model agnostic input data provided by the forecaster interface.
capacity : int, optional
An upper bound used for ``binomial`` and ``beta-binomial`` models.
Specifying ``capacity`` is mutually exclusive with providing a
``capacity_mode`` and ``capacity_value`` pair.
capacity_mode : str, optional
A method used to estimate the capacity. Must be eitherr ``"scale"``
or ``"factor"``.
capacity_value : float, optional
A value associated with the selected ``capacity_mode``.
vectorized : bool
If True, the capacity is already part of stan_data as a numpy
array. If False the capacity must be constructed from capacity
parameters
Returns
-------
ModelInputData
Updated stan_data
ModelParams
Guesses for the model parameters depending on the data
'''
pass
| 4
| 4
| 51
| 7
| 20
| 25
| 2
| 1.26
| 1
| 9
| 3
| 0
| 3
| 2
| 3
| 37
| 169
| 24
| 65
| 37
| 40
| 82
| 26
| 16
| 22
| 3
| 5
| 1
| 7
|
328,365
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/models.py
|
gloria.models.Binomial
|
import numpy as np
from scipy.stats import beta, betabinom, binom, gamma, nbinom, norm, poisson
from typing import Any, Callable, Literal, Mapping, Optional, Type, Union, cast
from typing_extensions import Self, TypeAlias
class Binomial(ModelBackendBase):
"""
Implementation of model backend for binomial distribution
"""
stan_file = BASEPATH / 'stan_models/binomial.stan'
kind = 'bu'
link_pair = LINK_FUNC_MAP['logit']
def yhat_func(self: Self, linked_arg: np.ndarray, capacity_vec: Optional[np.ndarray]=None, **kwargs: Any) -> np.ndarray:
"""
Produces the predicted values yhat
Parameters
----------
linked_arg : np.ndarray
Linked GLM output
capacity_vec : np.ndarray
An array containing the capacity for each timestamp. Only used for
prediction.
Returns
-------
np.ndarray
Predicted values
"""
capacity = self.stan_data.capacity if capacity_vec is None else capacity_vec
return capacity * linked_arg
def quant_func(self: Self, level: float, yhat: np.ndarray, capacity_vec: Optional[np.ndarray]=None, **kwargs: Any) -> np.ndarray:
"""
Quantile function of the underlying distribution
Parameters
----------
level : float
Level of confidence in (0,1)
yhat : np.ndarray
Predicted values.
capacity_vec : np.ndarray
An array containing the capacity for each timestamp. Only used for
prediction.
Returns
-------
np.ndarray
Quantile at given level
"""
capacity = self.stan_data.capacity if capacity_vec is None else capacity_vec
return binom.ppf(level, capacity, yhat / capacity)
def preprocess(self: Self, stan_data: ModelInputData, capacity: Optional[int]=None, capacity_mode: Optional[str]=None, capacity_value: Optional[float]=None, vectorized: bool=False, **kwargs: Any) -> tuple[ModelInputData, ModelParams]:
"""
Augment the input data for the stan model with model dependent data
and calculate initial guesses for model parameters.
Parameters
----------
stan_data : ModelInputData
Model agnostic input data provided by the forecaster interface
capacity : int, optional
An upper bound used for ``binomial`` and ``beta-binomial`` models.
Specifying ``capacity`` is mutually exclusive with providing a
``capacity_mode`` and ``capacity_value`` pair.
capacity_mode : str, optional
A method used to estimate the capacity. Must be eitherr ``"scale"``
or ``"factor"``.
capacity_value : float, optional
A value associated with the selected ``capacity_mode``.
vectorized : bool
If True, the capacity is already part of stan_data as a numpy
array. If False the capacity must be constructed from capacity
parameters
Returns
-------
ModelInputData
Updated stan_data
ModelParams
Guesses for the model parameters depending on the data
"""
if not vectorized:
capacity_settings = BinomialCapacity.from_parameters(capacity=capacity, capacity_mode=capacity_mode, capacity_value=capacity_value)
stan_data.capacity = get_capacity(y=stan_data.y, mode=capacity_settings.mode, value=capacity_settings.value)
y_scaled, self.linked_offset, self.linked_scale = self.normalize_data(y=stan_data.y, capacity=stan_data.capacity, lower_bound=True, upper_bound=True)
stan_data.linked_offset = self.linked_offset
stan_data.linked_scale = self.linked_scale
ini_params = self.initial_trend_parameters(y_scaled, stan_data)
return (stan_data, ini_params)
|
class Binomial(ModelBackendBase):
'''
Implementation of model backend for binomial distribution
'''
def yhat_func(self: Self, linked_arg: np.ndarray, capacity_vec: Optional[np.ndarray]=None, **kwargs: Any) -> np.ndarray:
'''
Produces the predicted values yhat
Parameters
----------
linked_arg : np.ndarray
Linked GLM output
capacity_vec : np.ndarray
An array containing the capacity for each timestamp. Only used for
prediction.
Returns
-------
np.ndarray
Predicted values
'''
pass
def quant_func(self: Self, level: float, yhat: np.ndarray, capacity_vec: Optional[np.ndarray]=None, **kwargs: Any) -> np.ndarray:
'''
Quantile function of the underlying distribution
Parameters
----------
level : float
Level of confidence in (0,1)
yhat : np.ndarray
Predicted values.
capacity_vec : np.ndarray
An array containing the capacity for each timestamp. Only used for
prediction.
Returns
-------
np.ndarray
Quantile at given level
'''
pass
def preprocess(self: Self, stan_data: ModelInputData, capacity: Optional[int]=None, capacity_mode: Optional[str]=None, capacity_value: Optional[float]=None, vectorized: bool=False, **kwargs: Any) -> tuple[ModelInputData, ModelParams]:
'''
Augment the input data for the stan model with model dependent data
and calculate initial guesses for model parameters.
Parameters
----------
stan_data : ModelInputData
Model agnostic input data provided by the forecaster interface
capacity : int, optional
An upper bound used for ``binomial`` and ``beta-binomial`` models.
Specifying ``capacity`` is mutually exclusive with providing a
``capacity_mode`` and ``capacity_value`` pair.
capacity_mode : str, optional
A method used to estimate the capacity. Must be eitherr ``"scale"``
or ``"factor"``.
capacity_value : float, optional
A value associated with the selected ``capacity_mode``.
vectorized : bool
If True, the capacity is already part of stan_data as a numpy
array. If False the capacity must be constructed from capacity
parameters
Returns
-------
ModelInputData
Updated stan_data
ModelParams
Guesses for the model parameters depending on the data
'''
pass
| 4
| 4
| 45
| 5
| 17
| 23
| 2
| 1.4
| 1
| 9
| 3
| 0
| 3
| 2
| 3
| 37
| 149
| 19
| 55
| 31
| 32
| 77
| 19
| 12
| 15
| 2
| 5
| 1
| 6
|
328,366
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/models.py
|
gloria.models.BinomialCapacity
|
from pydantic import BaseModel, ConfigDict, Field, ValidationInfo, field_validator
from typing import Any, Callable, Literal, Mapping, Optional, Type, Union, cast
class BinomialCapacity(BaseModel):
"""
Configuration parameters used by the augment_data method of the model
BinomialConstantN and BetaBinomialConstantN to determine the capacity
size.
"""
mode: Literal['constant', 'factor', 'scale']
value: Union[int, float]
@field_validator('value')
@classmethod
def validate_value(cls, value: Union[int, float], info) -> Union[int, float]:
"""
Validates the value pass along with the capacity size estimation
method.
"""
if 'mode' not in info.data:
raise ValueError("Can't validate 'value' field as 'mode' was invalid.")
if info.data['mode'] == 'constant':
if not isinstance(value, int):
raise ValueError(f"In capacity mode 'constant' the capacity value (={value}) must be an integer.")
elif info.data['mode'] == 'factor':
if value < 1:
raise ValueError(f"In capacity mode 'factor' the capacity value (={value}) must be >= 1.")
elif info.data['mode'] == 'scale':
if value >= 1 or value <= 0:
raise ValueError(f"In capacity mode 'scale' the capacity value (={value}) must be 0 < value < 1.")
return value
@classmethod
def from_parameters(cls, capacity, capacity_mode, capacity_value):
cap_is_given = capacity is not None
mode_is_given = capacity_mode is not None and capacity_value is not None
mode_is_incomplete = (capacity_mode is not None) ^ (capacity_value is not None)
if mode_is_incomplete:
raise ValueError("Provide either both 'capacity_mode' and 'capacity_value', or neither.")
if not cap_is_given ^ mode_is_given:
raise ValueError("Provide either 'capacity' or a 'capacity_mode' / 'capacity_value' pair.")
if cap_is_given:
capacity_mode = 'constant'
capacity_value = capacity
return cls(mode=capacity_mode, value=capacity_value)
|
class BinomialCapacity(BaseModel):
'''
Configuration parameters used by the augment_data method of the model
BinomialConstantN and BetaBinomialConstantN to determine the capacity
size.
'''
@field_validator('value')
@classmethod
def validate_value(cls, value: Union[int, float], info) -> Union[int, float]:
'''
Validates the value pass along with the capacity size estimation
method.
'''
pass
@classmethod
def from_parameters(cls, capacity, capacity_mode, capacity_value):
pass
| 6
| 2
| 28
| 1
| 24
| 3
| 6
| 0.19
| 1
| 3
| 0
| 0
| 0
| 0
| 2
| 84
| 69
| 5
| 54
| 10
| 46
| 10
| 26
| 6
| 23
| 8
| 5
| 2
| 12
|
328,367
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/models.py
|
gloria.models.Gamma
|
import numpy as np
from scipy.stats import beta, betabinom, binom, gamma, nbinom, norm, poisson
from typing_extensions import Self, TypeAlias
from typing import Any, Callable, Literal, Mapping, Optional, Type, Union, cast
class Gamma(ModelBackendBase):
"""
Implementation of model backend for gamma distribution
"""
stan_file = BASEPATH / 'stan_models/gamma.stan'
kind = 'biuf'
link_pair = LINK_FUNC_MAP['log']
def quant_func(self: Self, level: float, yhat: np.ndarray, scale: float=1, **kwargs: Any) -> np.ndarray:
"""
Quantile function of the underlying distribution
Parameters
----------
level : float
Level of confidence in (0,1)
yhat : np.ndarray
Predicted values.
scale : Union[float, np.ndarray, None]
Scale parameter of the distribution.
Returns
-------
np.ndarray
Quantile at given level
"""
return gamma.ppf(level, yhat * scale, scale=1 / scale)
def preprocess(self: Self, stan_data: ModelInputData, **kwargs: Any) -> tuple[ModelInputData, ModelParams]:
"""
Augment the input data for the stan model with model dependent data
and calculate initial guesses for model parameters.
Parameters
----------
stan_data : ModelInputData
Model agnostic input data provided by the forecaster interface
Returns
-------
ModelInputData
Updated stan_data
ModelParams
Guesses for the model parameters depending on the data
"""
y_scaled, self.linked_offset, self.linked_scale = self.normalize_data(y=stan_data.y, lower_bound=True)
stan_data.linked_offset = self.linked_offset
stan_data.linked_scale = self.linked_scale
ini_params = self.initial_trend_parameters(y_scaled, stan_data)
stan_data.variance_max = self.estimate_variance(stan_data, ini_params)
return (stan_data, ini_params)
|
class Gamma(ModelBackendBase):
'''
Implementation of model backend for gamma distribution
'''
def quant_func(self: Self, level: float, yhat: np.ndarray, scale: float=1, **kwargs: Any) -> np.ndarray:
'''
Quantile function of the underlying distribution
Parameters
----------
level : float
Level of confidence in (0,1)
yhat : np.ndarray
Predicted values.
scale : Union[float, np.ndarray, None]
Scale parameter of the distribution.
Returns
-------
np.ndarray
Quantile at given level
'''
pass
def preprocess(self: Self, stan_data: ModelInputData, **kwargs: Any) -> tuple[ModelInputData, ModelParams]:
'''
Augment the input data for the stan model with model dependent data
and calculate initial guesses for model parameters.
Parameters
----------
stan_data : ModelInputData
Model agnostic input data provided by the forecaster interface
Returns
-------
ModelInputData
Updated stan_data
ModelParams
Guesses for the model parameters depending on the data
'''
pass
| 3
| 3
| 33
| 6
| 10
| 18
| 1
| 1.91
| 1
| 5
| 2
| 0
| 2
| 2
| 2
| 36
| 80
| 14
| 23
| 16
| 12
| 44
| 13
| 8
| 10
| 1
| 5
| 0
| 2
|
328,368
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/models.py
|
gloria.models.LinkPair
|
from typing import Any, Callable, Literal, Mapping, Optional, Type, Union, cast
import numpy as np
from pydantic import BaseModel, ConfigDict, Field, ValidationInfo, field_validator
class LinkPair(BaseModel):
"""
Link function pairs connection the expectation value to Stan's GLM
predictors
link = transforming expectation value to predictor
inverse = transforming predictor to expectation value
"""
link: Callable[[np.ndarray], np.ndarray]
inverse: Callable[[np.ndarray], np.ndarray]
|
class LinkPair(BaseModel):
'''
Link function pairs connection the expectation value to Stan's GLM
predictors
link = transforming expectation value to predictor
inverse = transforming predictor to expectation value
'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 11
| 2
| 3
| 1
| 2
| 6
| 3
| 1
| 2
| 0
| 5
| 0
| 0
|
328,369
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/models.py
|
gloria.models.ModelBackendBase
|
from pathlib import Path
from scipy.optimize import minimize
from typing import Any, Callable, Literal, Mapping, Optional, Type, Union, cast
from gloria.utilities.errors import NotFittedError
from gloria.utilities.logging import get_logger
import logging
from scipy.stats import beta, betabinom, binom, gamma, nbinom, norm, poisson
from cmdstanpy import CmdStanLaplace, CmdStanMLE, CmdStanModel, set_cmdstan_path
import numpy as np
from abc import ABC, abstractmethod
from gloria.utilities.constants import _CMDSTAN_VERSION
from typing_extensions import Self, TypeAlias
import pandas as pd
class ModelBackendBase(ABC):
"""
Abstract base clase for the model backend.
The model backend is in charge of passing data and model parameters to the
stan code as well as distribution model dependent prediction
"""
stan_file = Path()
kind = ''
link_pair = LINK_FUNC_MAP['id']
def yhat_func(self: Self, linked_arg: np.ndarray, **kwargs: Any) -> np.ndarray:
"""
Produces the predicted values yhat.
Parameters
----------
linked_arg : np.ndarray
Linked GLM output
scale : Union[float, np.ndarray, None]
Scale parameter of the distribution.
Returns
-------
np.ndarray
Predicted values
"""
return linked_arg
def quant_func(self: Self, level: float, yhat: np.ndarray, **kwargs: Any) -> np.ndarray:
"""
Quantile function of the underlying distribution
"""
return np.array([])
def __init__(self: Self, model_name: str, install=True) -> None:
"""
Initialize the model backend.
Parameters
----------
model_name : str
Name of the model. Must match any of the keys in MODEL_MAP. This
will be validated by the ModelBackend class
"""
models_path = Path(__file__).parent / 'stan_models'
cmdstan_path = models_path / f'cmdstan-{_CMDSTAN_VERSION}'
set_cmdstan_path(str(cmdstan_path))
self.model = CmdStanModel(stan_file=self.stan_file, exe_file=self.stan_file.with_suffix('.exe'))
stan_logger = logging.getLogger('cmdstanpy')
stan_logger.setLevel(logging.CRITICAL)
for handler in stan_logger.handlers:
handler.setLevel(logging.CRITICAL)
self.model_name = model_name
self.stan_data = ModelInputData()
self.stan_inits = ModelParams()
self.linked_offset: float = 0.0
self.linked_scale: float = 1.0
self.stan_fit: Union[CmdStanMLE, CmdStanLaplace] = None
self.use_laplace = False
self.fit_params: dict[str, Any] = dict()
@abstractmethod
def preprocess(self: Self, stan_data: ModelInputData, **kwargs: Any) -> tuple[ModelInputData, ModelParams]:
"""
Augment the input data for the stan model with model dependent data
and calculate initial guesses for model parameters.
Parameters
----------
stan_data : ModelInputData
Model agnostic input data provided by the forecaster interface
Raises
------
NotImplementedError
Will be raised if the child-model class did not implement this
method
Returns
-------
ModelInputData
Updated stan_data
ModelParams
Guesses for the model parameters depending on the data
"""
pass
def normalize_data(self: Self, y: np.ndarray, capacity: Optional[Union[int, np.ndarray]]=None, lower_bound: bool=False, upper_bound: bool=False) -> tuple[np.ndarray, float, float]:
"""
Normalize response variable y, apply the model`s link function, and
re-scale the result to the open unit interval ``(0, 1)``.
The routine performs three steps:
1. **Optional normalization** - If ``N`` is provided, divide ``y`` by
``N`` to convert counts to rates.
2. **Boundary adjustment** - Replace exact 0 or 1 with a small offset
when ``lower_bound`` or ``upper_bound`` is set to ``True``.
3. **Link + scaling** - Apply the link function, then rescale the
result to the ``(0, 1)`` interval.
Parameters
----------
y : np.ndarray
Raw response variable. Shape can be any, provided it broadcasts
with ``N`` if ``N`` is an array.
capacity : Optional[Union[int, np.ndarray]]
Capacity size(s) for normalisation. If ``None`` (default) no
division is performed. If an array is given it must have the same
shape as ``y``.
lower_bound : bool, optional
Set to ``True`` when the response is bounded below at zero.
Exact zeros are replaced with ``1e-10`` before the link
transformation to avoid ``-inf``.
upper_bound : bool, optional
Set to ``True`` when the response is bounded above at one.
Exact ones are replaced with ``1 - 1e-10`` before the link
transformation to avoid ``+inf``.
Returns
-------
y_scaled : TYPE
The linked response min-max-scaled to lie strictly in ``(0, 1)``.
linked_offset : TYPE
The minimum of the linked, *un*-scaled response; add this to
reverse the min-max scaling.
linked_scale : TYPE
The range (max - min) of the linked, un-scaled response; multiply
by this to reverse the min-max scaling.
"""
y_scaled = y.copy()
if capacity is not None:
p = np.full(y_scaled.shape, 1e-10)
y_scaled = np.divide(y_scaled, capacity, out=p, where=capacity != 0)
if lower_bound:
y_scaled = np.where(y_scaled == 0, 1e-10, y_scaled)
if upper_bound:
y_scaled = np.where(y_scaled == 1, 1 - 1e-10, y_scaled)
y_scaled = self.link_pair.link(y_scaled)
linked_offset = np.min(y_scaled)
linked_scale = np.max(y_scaled) - linked_offset
y_scaled = (y_scaled - linked_offset) / linked_scale
return (y_scaled, linked_offset, linked_scale)
def initial_trend_parameters(self: Self, y_scaled: np.ndarray, stan_data: ModelInputData) -> ModelParams:
"""
Infers an estimation of the fit parameters k, m, delta from
the data.
Parameters
----------
y_scaled : np.ndarray
The input y-data scaled to the GLM depending on the model, eg.
logit(y / N) for the binomial model
stan_data : ModelInputData
Model agnostic input data provided by the forecaster interface.
Returns
-------
ModelParams
Contains the estimations
"""
t = stan_data.t
y_scaled = y_scaled.copy().astype(float)
T = t[-1] - t[0]
k = (y_scaled[-1] - y_scaled[0]) / T
m = y_scaled[0] - k * y_scaled[-1]
def trend_optimizer(x: np.ndarray) -> float:
"""
An optimizable function that is used to find a set of parameters
minimizing the residual sum of squares for the trend model.
"""
return float(((self.piecewise_linear(t, stan_data.t_change, x[0], x[1], x[2:]) - y_scaled) ** 2).sum())
res = minimize(trend_optimizer, x0=[m, k, *np.zeros(stan_data.S)])
m, k, delta = (res.x[0], res.x[1], res.x[2:])
k = min(max(res.x[1], -0.5), 0.5)
if m < 0:
k_old = k
k = k + m / stan_data.t_change[0]
m = 0
delta[0] = delta[0] + (k_old - k)
elif m > 1:
k_old = k
k = k + (m - 1) / stan_data.t_change[0]
m = 1
delta[0] = delta[0] + (k_old - k)
return ModelParams(m=m, k=k, delta=delta, beta=np.zeros(stan_data.K))
def estimate_variance(self: Self, stan_data: ModelInputData, trend_params: ModelParams) -> float:
"""
Estimate an upper bound for the variance based on the residuals between
the observed data and the predicted trend.
Parameters
----------
stan_data : ModelInputData
Model agnostic input data provided by the forecaster interface.
trend_params : ModelParams
Contains parameters from initial trend parameter guess.
Returns
-------
float
The upper bound of variance estimated from the residuals between
data and trend.
"""
trend_arg, _ = self.predict_regression(stan_data.t, stan_data.X, trend_params.dict())
trend_linked = self.link_pair.inverse(trend_arg)
trend = self.yhat_func(trend_linked)
variance_max = 1.5 * (stan_data.y - trend).std() ** 2
return variance_max
def fit(self: Self, stan_data: ModelInputData, optimize_mode: Literal['MAP', 'MLE'], use_laplace: bool, capacity: Optional[int]=None, capacity_mode: Optional[str]=None, capacity_value: Optional[float]=None, vectorized: bool=False) -> Union[CmdStanMLE, CmdStanLaplace]:
"""
Calculates initial parameters and fits the model to the input data.
Parameters
----------
stan_data : ModelInputData
An object that holds the input data required by the data-block of
the stan model.
optimize_mode : Literal['MAP', 'MLE'], optional
If 'MAP' (default), the optimization step yiels the Maximum A
Posteriori, if 'MLE' the Maximum Likehood Estimate
use_laplace : bool, optional
If True (default), the optimization is followed by a sampling over
the Laplace approximation around the posterior mode.
capacity : int, optional
An upper bound used for ``binomial`` and ``beta-binomial`` models.
Specifying ``capacity`` is mutually exclusive with providing a
``capacity_mode`` and ``capacity_value`` pair.
capacity_mode : str, optional
A method used to estimate the capacity. Must be eitherr ``"scale"``
or ``"factor"``.
capacity_value : float, optional
A value associated with the selected ``capacity_mode``.
vectorized : bool
If True, the capacity is already part of stan_data as a numpy
array. If False the capacity must be constructed from capacity
parameters
Returns
-------
Union[CmdStanMLE, CmdStanLaplace]
The fitted CmdStanModel object that holds the fitted parameters
"""
jacobian = True if optimize_mode == 'MAP' else False
self.stan_data = stan_data
self.stan_data, self.stan_inits = self.preprocess(stan_data, capacity=capacity, capacity_mode=capacity_mode, capacity_value=capacity_value, vectorized=vectorized)
if stan_data.X.size:
reg_strength = np.sqrt((stan_data.X ** 2).sum(axis=0))
q = reg_strength / np.median(reg_strength)
stan_data.X = stan_data.X / q
get_logger().debug(f'Optimizing model parameters using {optimize_mode}.')
stan_data_opt = self.stan_data.copy()
if hasattr(stan_data_opt, 'capacity') and (not vectorized):
stan_data_opt.capacity = np.ones(stan_data_opt.T, dtype=int) * stan_data_opt.capacity
optimize_args = dict(data=stan_data_opt.dict(), inits=self.stan_inits.dict(), algorithm='BFGS', iter=int(10000.0), jacobian=jacobian)
run_newton = True
get_logger().info('Starting optimization.')
for init_alpha in [10 ** (-4 - i / 2) for i in range(0, 2 * 4)]:
optimize_args['init_alpha'] = init_alpha
try:
optimized_model = self.model.optimize(**cast(Mapping[str, Any], optimize_args))
except RuntimeError:
get_logger().debug(f'Optimization with init_alpha={init_alpha} failed. Moving to next.')
continue
else:
run_newton = False
break
if run_newton:
del optimize_args['init_alpha']
get_logger().warning('Optimization terminated abnormally. Falling back to Newton.')
optimize_args['algorithm'] = 'Newton'
optimized_model = self.model.optimize(**cast(Mapping[str, Any], optimize_args))
if use_laplace:
get_logger().info('Starting Laplace sampling.')
self.stan_fit = self.model.laplace_sample(data=stan_data.dict(), mode=optimized_model, jacobian=jacobian)
self.use_laplace = True
else:
self.stan_fit = optimized_model
self.use_laplace = False
self.fit_params = {k: v for k, v in self.stan_fit.stan_variables().items() if k != 'trend'}
if stan_data.X.size:
self.stan_data.X *= q
self.fit_params['beta'] /= q
return self.stan_fit
def predict(self: Self, t: np.ndarray, X: np.ndarray, interval_width: float, trend_samples: int, capacity_vec: Optional[np.ndarray]=None) -> pd.DataFrame:
"""
Based on the fitted model parameters predicts values and uncertainties
for given timestamps.
Parameters
----------
t : np.ndarray
Timestamps as integer values
X : np.ndarray
Overall feature matrix
interval_width : float
Confidence interval width: Must fall in [0, 1]
trend_samples : int
Number of samples to draw from
capacity_vec : Optional[np.ndarray], optional
Vectorized capacity - only relevant for models 'binomial' and
'beta-binomial'. Default is None.
Raises
------
ValueError
Is raised if the error was not fitted prior to prediction
Returns
-------
result : pd.DataFrame
Dataframe containing all predicted metrics, including
uncertainties. The columns include:
* yhat/trend: mean predicted value for overall model or trend
* yhat/trend_upper/lower: uncertainty intervals for mean
predicted values with respect to specified interval_width
* observed_upper/lower: uncertainty intervals for observed
values
* '_linked' versions of all quantities except for 'observed'.
"""
if self.fit_params == dict():
raise NotFittedError("Can't predict prior to fit.")
params_dict = self.fit_params
lower_level = (1 - interval_width) / 2
upper_level = lower_level + interval_width
trend_uncertainty = self.trend_uncertainty(t, interval_width, trend_samples)
if self.use_laplace:
get_logger().info('Evaluate model at all samples for yhat upper and lower bounds.')
params = [dict(zip(params_dict.keys(), t)) for t in zip(*params_dict.values())]
scale = params_dict['scale'].mean() if 'scale' in params_dict else None
yhat_arg_lst = []
trend_arg_lst = []
for pars in params:
trend_arg, yhat_arg = self.predict_regression(t, X, pars)
yhat_arg_lst.append(yhat_arg)
trend_arg_lst.append(trend_arg)
yhat_args = np.array(yhat_arg_lst)
trend_args = np.array(trend_arg_lst)
yhat_arg = yhat_args.mean(axis=0)
yhat_lower_arg = self.percentile(yhat_args, 100 * lower_level, axis=0)
yhat_upper_arg = self.percentile(yhat_args, 100 * upper_level, axis=0)
trend_arg = trend_args.mean(axis=0)
yhat_linked = self.link_pair.inverse(yhat_arg)
yhat_linked_lower = self.link_pair.inverse(yhat_lower_arg + trend_uncertainty.lower)
yhat_linked_upper = self.link_pair.inverse(yhat_upper_arg + trend_uncertainty.upper)
yhat = self.yhat_func(yhat_linked, scale=scale, capacity_vec=capacity_vec)
yhat_lower = self.yhat_func(yhat_linked_lower, scale=scale, capacity_vec=capacity_vec)
yhat_upper = self.yhat_func(yhat_linked_upper, scale=scale, capacity_vec=capacity_vec)
else:
trend_arg, yhat_arg = self.predict_regression(t, X, params_dict)
scale = params_dict['scale'] if 'scale' in params_dict else None
yhat_linked = self.link_pair.inverse(yhat_arg)
yhat_linked_lower = yhat_linked
yhat_linked_upper = yhat_linked
yhat = self.yhat_func(yhat_linked, scale=scale, capacity_vec=capacity_vec)
yhat_lower = yhat
yhat_upper = yhat
trend_linked = self.link_pair.inverse(trend_arg)
trend_linked_lower = self.link_pair.inverse(trend_arg + trend_uncertainty.lower)
trend_linked_upper = self.link_pair.inverse(trend_arg + trend_uncertainty.upper)
trend = self.yhat_func(trend_linked, scale=scale, capacity_vec=capacity_vec)
trend_lower = self.yhat_func(trend_linked_lower, scale=scale, capacity_vec=capacity_vec)
trend_upper = self.yhat_func(trend_linked_upper, scale=scale, capacity_vec=capacity_vec)
observed_lower = self.quant_func(lower_level, yhat - trend + trend_lower, scale=scale, capacity_vec=capacity_vec)
observed_upper = self.quant_func(upper_level, yhat - trend + trend_upper, scale=scale, capacity_vec=capacity_vec)
result = pd.DataFrame({'yhat': yhat, 'yhat_lower': yhat_lower, 'yhat_upper': yhat_upper, 'yhat_linked': yhat_linked, 'yhat_linked_lower': yhat_linked_lower, 'yhat_linked_upper': yhat_linked_upper, 'observed_lower': observed_lower, 'observed_upper': observed_upper, 'trend': trend, 'trend_lower': trend_lower, 'trend_upper': trend_upper, 'trend_linked': trend_linked, 'trend_linked_lower': trend_linked_lower, 'trend_linked_upper': trend_linked_upper})
return result
def predict_regression(self: Self, t: np.ndarray, X: np.ndarray, pars: dict[str, Union[float, np.ndarray]]) -> tuple[np.ndarray, np.ndarray]:
"""
Calculate both trend and GLM argument from fitted model parameters
Parameters
----------
t : np.ndarray
Timestamps as integer values
X : np.ndarray
Overall feature matrix
pars : dict[str, Union[float, np.ndarray]]
Dictionary containing initial rate k and offset m as well as rate
changes delta
Returns
-------
trend : np.ndarray
The frend function
np.ndarray
Argument of the GLM
"""
trend = self.predict_trend(t, pars)
if self.stan_data.K == 0:
return (trend, trend)
beta = pars['beta']
Xb = np.matmul(X, beta)
return (self.linked_offset + self.linked_scale * trend, self.linked_offset + self.linked_scale * (trend + Xb))
def predict_trend(self: Self, t: np.ndarray, pars: dict[str, Union[float, np.ndarray]]) -> np.ndarray:
"""
Predict the trend based on model parameters
Parameters
----------
t : np.ndarray
Timestamps as integer values
pars : dict[str, Union[float, np.ndarray]]
Dictionary containing initial rate k and offset m as well as rate
changes delta
Returns
-------
trend : np.ndarray
Predicted trend
"""
changepoints_int = self.stan_data.t_change
m = cast(float, pars['m'])
k = cast(float, pars['k'])
deltas = cast(np.ndarray, pars['delta'])
trend = self.piecewise_linear(t, changepoints_int, m, k, deltas)
return trend
def piecewise_linear(self: Self, t: np.ndarray, changepoints_int: np.ndarray, m: float, k: float, deltas: np.ndarray) -> np.ndarray:
"""
Calculate the piecewise linear trend function
Parameters
----------
t : np.ndarray
Timestamps as integer values
changepoints_int : np.ndarray
Timestamps of changepoints as integer values
m : float
Trend offset
k : float
Base trend growth rate
deltas : np.ndarray
Trend rate adjustments, length S
Returns
-------
np.ndarray
The calculated trend
"""
deltas_t = (changepoints_int[None, :] <= t[..., None]) * deltas
k_t = deltas_t.sum(axis=1) + k
m_t = (deltas_t * -changepoints_int).sum(axis=1) + m
return k_t * t + m_t
def trend_uncertainty(self: Self, t: np.ndarray, interval_width: float, trend_samples: int) -> Uncertainty:
"""
Generates upper and lower bound estimations for the trend prediction.
Parameters
----------
t : np.ndarray
Timestamps as integers
interval_width : float
Confidence interval width: Must fall in [0, 1]
trend_samples : int
Number of samples to draw from
Returns
-------
upper : np.ndarray
Upper bound of trend uncertainty
lower : np.ndarray
Lower bound of trend uncertainty
"""
if trend_samples == 0:
upper = np.zeros(t.shape)
lower = np.zeros(t.shape)
return Uncertainty(upper=upper, lower=lower)
mean_delta = np.abs(self.fit_params['delta']).mean()
t_history = t[t <= self.stan_data.t.max()]
t_future = t[t > self.stan_data.t.max()]
T_future = len(t_future)
likelihood = len(self.stan_data.t_change) / self.stan_data.T
bool_slope_change = np.random.uniform(size=(trend_samples, T_future)) < likelihood
shift_values = np.random.laplace(scale=mean_delta, size=bool_slope_change.shape)
shift_matrix = bool_slope_change * shift_values
uncertainties = shift_matrix.cumsum(axis=1).cumsum(axis=1)
lower_level = (1 - interval_width) / 2
upper_level = lower_level + interval_width
upper = np.percentile(uncertainties, 100 * upper_level, axis=0)
lower = np.percentile(uncertainties, 100 * lower_level, axis=0)
past_uncertainty = np.zeros(t_history.shape)
upper = np.concatenate([past_uncertainty, upper])
lower = np.concatenate([past_uncertainty, lower])
return Uncertainty(upper=upper, lower=lower)
def percentile(self: Self, a: np.ndarray, *args: tuple[Any, ...], **kwargs: dict[str, Any]) -> np.ndarray:
"""
We rely on np.nanpercentile in the rare instances where there
are a small number of bad samples with MCMC that contain NaNs.
However, since np.nanpercentile is far slower than np.percentile,
we only fall back to it if the array contains NaNs.
"""
fn = np.nanpercentile if np.isnan(a).any() else np.percentile
return fn(a, *args, **kwargs)
|
class ModelBackendBase(ABC):
'''
Abstract base clase for the model backend.
The model backend is in charge of passing data and model parameters to the
stan code as well as distribution model dependent prediction
'''
def yhat_func(self: Self, linked_arg: np.ndarray, **kwargs: Any) -> np.ndarray:
'''
Produces the predicted values yhat.
Parameters
----------
linked_arg : np.ndarray
Linked GLM output
scale : Union[float, np.ndarray, None]
Scale parameter of the distribution.
Returns
-------
np.ndarray
Predicted values
'''
pass
def quant_func(self: Self, level: float, yhat: np.ndarray, **kwargs: Any) -> np.ndarray:
'''
Quantile function of the underlying distribution
'''
pass
def __init__(self: Self, model_name: str, install=True) -> None:
'''
Initialize the model backend.
Parameters
----------
model_name : str
Name of the model. Must match any of the keys in MODEL_MAP. This
will be validated by the ModelBackend class
'''
pass
@abstractmethod
def preprocess(self: Self, stan_data: ModelInputData, **kwargs: Any) -> tuple[ModelInputData, ModelParams]:
'''
Augment the input data for the stan model with model dependent data
and calculate initial guesses for model parameters.
Parameters
----------
stan_data : ModelInputData
Model agnostic input data provided by the forecaster interface
Raises
------
NotImplementedError
Will be raised if the child-model class did not implement this
method
Returns
-------
ModelInputData
Updated stan_data
ModelParams
Guesses for the model parameters depending on the data
'''
pass
def normalize_data(self: Self, y: np.ndarray, capacity: Optional[Union[int, np.ndarray]]=None, lower_bound: bool=False, upper_bound: bool=False) -> tuple[np.ndarray, float, float]:
'''
Normalize response variable y, apply the model`s link function, and
re-scale the result to the open unit interval ``(0, 1)``.
The routine performs three steps:
1. **Optional normalization** - If ``N`` is provided, divide ``y`` by
``N`` to convert counts to rates.
2. **Boundary adjustment** - Replace exact 0 or 1 with a small offset
when ``lower_bound`` or ``upper_bound`` is set to ``True``.
3. **Link + scaling** - Apply the link function, then rescale the
result to the ``(0, 1)`` interval.
Parameters
----------
y : np.ndarray
Raw response variable. Shape can be any, provided it broadcasts
with ``N`` if ``N`` is an array.
capacity : Optional[Union[int, np.ndarray]]
Capacity size(s) for normalisation. If ``None`` (default) no
division is performed. If an array is given it must have the same
shape as ``y``.
lower_bound : bool, optional
Set to ``True`` when the response is bounded below at zero.
Exact zeros are replaced with ``1e-10`` before the link
transformation to avoid ``-inf``.
upper_bound : bool, optional
Set to ``True`` when the response is bounded above at one.
Exact ones are replaced with ``1 - 1e-10`` before the link
transformation to avoid ``+inf``.
Returns
-------
y_scaled : TYPE
The linked response min-max-scaled to lie strictly in ``(0, 1)``.
linked_offset : TYPE
The minimum of the linked, *un*-scaled response; add this to
reverse the min-max scaling.
linked_scale : TYPE
The range (max - min) of the linked, un-scaled response; multiply
by this to reverse the min-max scaling.
'''
pass
def initial_trend_parameters(self: Self, y_scaled: np.ndarray, stan_data: ModelInputData) -> ModelParams:
'''
Infers an estimation of the fit parameters k, m, delta from
the data.
Parameters
----------
y_scaled : np.ndarray
The input y-data scaled to the GLM depending on the model, eg.
logit(y / N) for the binomial model
stan_data : ModelInputData
Model agnostic input data provided by the forecaster interface.
Returns
-------
ModelParams
Contains the estimations
'''
pass
def trend_optimizer(x: np.ndarray) -> float:
'''
An optimizable function that is used to find a set of parameters
minimizing the residual sum of squares for the trend model.
'''
pass
def estimate_variance(self: Self, stan_data: ModelInputData, trend_params: ModelParams) -> float:
'''
Estimate an upper bound for the variance based on the residuals between
the observed data and the predicted trend.
Parameters
----------
stan_data : ModelInputData
Model agnostic input data provided by the forecaster interface.
trend_params : ModelParams
Contains parameters from initial trend parameter guess.
Returns
-------
float
The upper bound of variance estimated from the residuals between
data and trend.
'''
pass
def fit(self: Self, stan_data: ModelInputData, optimize_mode: Literal['MAP', 'MLE'], use_laplace: bool, capacity: Optional[int]=None, capacity_mode: Optional[str]=None, capacity_value: Optional[float]=None, vectorized: bool=False) -> Union[CmdStanMLE, CmdStanLaplace]:
'''
Calculates initial parameters and fits the model to the input data.
Parameters
----------
stan_data : ModelInputData
An object that holds the input data required by the data-block of
the stan model.
optimize_mode : Literal['MAP', 'MLE'], optional
If 'MAP' (default), the optimization step yiels the Maximum A
Posteriori, if 'MLE' the Maximum Likehood Estimate
use_laplace : bool, optional
If True (default), the optimization is followed by a sampling over
the Laplace approximation around the posterior mode.
capacity : int, optional
An upper bound used for ``binomial`` and ``beta-binomial`` models.
Specifying ``capacity`` is mutually exclusive with providing a
``capacity_mode`` and ``capacity_value`` pair.
capacity_mode : str, optional
A method used to estimate the capacity. Must be eitherr ``"scale"``
or ``"factor"``.
capacity_value : float, optional
A value associated with the selected ``capacity_mode``.
vectorized : bool
If True, the capacity is already part of stan_data as a numpy
array. If False the capacity must be constructed from capacity
parameters
Returns
-------
Union[CmdStanMLE, CmdStanLaplace]
The fitted CmdStanModel object that holds the fitted parameters
'''
pass
def predict(self: Self, t: np.ndarray, X: np.ndarray, interval_width: float, trend_samples: int, capacity_vec: Optional[np.ndarray]=None) -> pd.DataFrame:
'''
Based on the fitted model parameters predicts values and uncertainties
for given timestamps.
Parameters
----------
t : np.ndarray
Timestamps as integer values
X : np.ndarray
Overall feature matrix
interval_width : float
Confidence interval width: Must fall in [0, 1]
trend_samples : int
Number of samples to draw from
capacity_vec : Optional[np.ndarray], optional
Vectorized capacity - only relevant for models 'binomial' and
'beta-binomial'. Default is None.
Raises
------
ValueError
Is raised if the error was not fitted prior to prediction
Returns
-------
result : pd.DataFrame
Dataframe containing all predicted metrics, including
uncertainties. The columns include:
* yhat/trend: mean predicted value for overall model or trend
* yhat/trend_upper/lower: uncertainty intervals for mean
predicted values with respect to specified interval_width
* observed_upper/lower: uncertainty intervals for observed
values
* '_linked' versions of all quantities except for 'observed'.
'''
pass
def predict_regression(self: Self, t: np.ndarray, X: np.ndarray, pars: dict[str, Union[float, np.ndarray]]) -> tuple[np.ndarray, np.ndarray]:
'''
Calculate both trend and GLM argument from fitted model parameters
Parameters
----------
t : np.ndarray
Timestamps as integer values
X : np.ndarray
Overall feature matrix
pars : dict[str, Union[float, np.ndarray]]
Dictionary containing initial rate k and offset m as well as rate
changes delta
Returns
-------
trend : np.ndarray
The frend function
np.ndarray
Argument of the GLM
'''
pass
def predict_trend(self: Self, t: np.ndarray, pars: dict[str, Union[float, np.ndarray]]) -> np.ndarray:
'''
Predict the trend based on model parameters
Parameters
----------
t : np.ndarray
Timestamps as integer values
pars : dict[str, Union[float, np.ndarray]]
Dictionary containing initial rate k and offset m as well as rate
changes delta
Returns
-------
trend : np.ndarray
Predicted trend
'''
pass
def piecewise_linear(self: Self, t: np.ndarray, changepoints_int: np.ndarray, m: float, k: float, deltas: np.ndarray) -> np.ndarray:
'''
Calculate the piecewise linear trend function
Parameters
----------
t : np.ndarray
Timestamps as integer values
changepoints_int : np.ndarray
Timestamps of changepoints as integer values
m : float
Trend offset
k : float
Base trend growth rate
deltas : np.ndarray
Trend rate adjustments, length S
Returns
-------
np.ndarray
The calculated trend
'''
pass
def trend_uncertainty(self: Self, t: np.ndarray, interval_width: float, trend_samples: int) -> Uncertainty:
'''
Generates upper and lower bound estimations for the trend prediction.
Parameters
----------
t : np.ndarray
Timestamps as integers
interval_width : float
Confidence interval width: Must fall in [0, 1]
trend_samples : int
Number of samples to draw from
Returns
-------
upper : np.ndarray
Upper bound of trend uncertainty
lower : np.ndarray
Lower bound of trend uncertainty
'''
pass
def percentile(self: Self, a: np.ndarray, *args: tuple[Any, ...], **kwargs: dict[str, Any]) -> np.ndarray:
'''
We rely on np.nanpercentile in the rare instances where there
are a small number of bad samples with MCMC that contain NaNs.
However, since np.nanpercentile is far slower than np.percentile,
we only fall back to it if the array contains NaNs.
'''
pass
| 17
| 16
| 57
| 6
| 26
| 26
| 2
| 1.02
| 1
| 15
| 4
| 7
| 14
| 9
| 14
| 34
| 866
| 111
| 379
| 164
| 309
| 388
| 200
| 110
| 184
| 9
| 4
| 2
| 37
|
328,370
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/models.py
|
gloria.models.ModelInputData
|
import numpy as np
from scipy.stats import beta, betabinom, binom, gamma, nbinom, norm, poisson
from typing import Any, Callable, Literal, Mapping, Optional, Type, Union, cast
from pydantic import BaseModel, ConfigDict, Field, ValidationInfo, field_validator
class ModelInputData(BaseModel):
"""
A container for the input data of each model. Additional model-dependent
parameters have to be added within the model
"""
model_config = ConfigDict(extra='allow', arbitrary_types_allowed=True)
T: int = Field(ge=0, default=0)
S: int = Field(ge=0, default=0)
K: int = Field(ge=0, default=0)
tau: float = Field(gt=0, default=3)
gamma: float = Field(gt=0, default=3)
y: np.ndarray = np.array([])
t: np.ndarray = np.array([])
t_change: np.ndarray = np.array([])
X: np.ndarray = np.array([[]])
sigmas: np.ndarray = np.array([])
linked_offset: Optional[float] = None
linked_scale: Optional[float] = None
@field_validator('S')
@classmethod
def validate_S(cls, S: int, info: ValidationInfo) -> int:
if S > info.data['T']:
raise ValueError('Number of changepoints must be less or equal number of data points.')
return S
@field_validator('y')
@classmethod
def validate_y_shape(cls, y: np.ndarray, info: ValidationInfo) -> np.ndarray:
if len(y.shape) != 1:
raise ValueError('Data array must be 1d-ndarray.')
if info.data['T'] != len(y):
raise ValueError('Length of y does not equal specified T')
return y
@field_validator('t')
@classmethod
def validate_t_shape(cls, t: np.ndarray, info: ValidationInfo) -> np.ndarray:
if len(t.shape) != 1:
raise ValueError('Timestamp array must be 1d-ndarray.')
if info.data['T'] != len(t):
raise ValueError('Length of t does not equal specified T')
return t
@field_validator('t_change')
@classmethod
def validate_t_change_shape(cls, t_change: np.ndarray, info: ValidationInfo) -> np.ndarray:
if len(t_change.shape) != 1:
raise ValueError('Changepoint array must be 1d-ndarray.')
if info.data['S'] != len(t_change):
raise ValueError('Length of t_change does not equal specified S')
return t_change
@field_validator('X')
@classmethod
def validate_X_shape(cls, X: np.ndarray, info: ValidationInfo) -> np.ndarray:
if len(X.shape) != 2:
raise ValueError('Regressor matrix X must be 2d-ndarray.')
if X.shape[1] == 0:
return X
if info.data['T'] != X.shape[0]:
raise ValueError('Regressor matrix X must have same number of rows as timestamp.')
if info.data['K'] != X.shape[1]:
raise ValueError('Regressor matrix X must have same number of columns as specified K.')
return X
@field_validator('sigmas')
@classmethod
def validate_sigmas(cls, sigmas: np.ndarray, info: ValidationInfo) -> np.ndarray:
if len(sigmas.shape) != 1:
raise ValueError('Sigmas array must be 1d-ndarray.')
if info.data['K'] != len(sigmas):
raise ValueError('Length of sigmas does not equal specified K.')
if not np.all(sigmas > 0):
raise ValueError('All elements in sigmas must be greater than 0.')
return sigmas
|
class ModelInputData(BaseModel):
'''
A container for the input data of each model. Additional model-dependent
parameters have to be added within the model
'''
@field_validator('S')
@classmethod
def validate_S(cls, S: int, info: ValidationInfo) -> int:
pass
@field_validator('y')
@classmethod
def validate_y_shape(cls, y: np.ndarray, info: ValidationInfo) -> np.ndarray:
pass
@field_validator('t')
@classmethod
def validate_t_shape(cls, t: np.ndarray, info: ValidationInfo) -> np.ndarray:
pass
@field_validator('t_change')
@classmethod
def validate_t_change_shape(cls, t_change: np.ndarray, info: ValidationInfo) -> np.ndarray:
pass
@field_validator('X')
@classmethod
def validate_X_shape(cls, X: np.ndarray, info: ValidationInfo) -> np.ndarray:
pass
@field_validator('sigmas')
@classmethod
def validate_sigmas(cls, sigmas: np.ndarray, info: ValidationInfo) -> np.ndarray:
pass
| 19
| 1
| 10
| 0
| 10
| 0
| 3
| 0.22
| 1
| 3
| 0
| 0
| 0
| 0
| 6
| 88
| 104
| 8
| 88
| 36
| 59
| 19
| 54
| 20
| 47
| 5
| 5
| 1
| 20
|
328,371
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/models.py
|
gloria.models.ModelParams
|
from pydantic import BaseModel, ConfigDict, Field, ValidationInfo, field_validator
import numpy as np
from scipy.stats import beta, betabinom, binom, gamma, nbinom, norm, poisson
class ModelParams(BaseModel):
"""
A container for the fitting parameter of each model. Additional model-
dependent parameters have to be added within the model
"""
model_config = ConfigDict(extra='allow', arbitrary_types_allowed=True)
k: float = 0
m: float = 0
delta: np.ndarray = np.array([])
beta: np.ndarray = np.array([])
|
class ModelParams(BaseModel):
'''
A container for the fitting parameter of each model. Additional model-
dependent parameters have to be added within the model
'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1.11
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 17
| 2
| 9
| 6
| 8
| 10
| 6
| 6
| 5
| 0
| 5
| 0
| 0
|
328,372
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/models.py
|
gloria.models.NegativeBinomial
|
import numpy as np
from typing import Any, Callable, Literal, Mapping, Optional, Type, Union, cast
from typing_extensions import Self, TypeAlias
from scipy.stats import beta, betabinom, binom, gamma, nbinom, norm, poisson
class NegativeBinomial(ModelBackendBase):
"""
Implementation of model backend for negative binomial distribution
"""
stan_file = BASEPATH / 'stan_models/negative_binomial.stan'
kind = 'bu'
link_pair = LINK_FUNC_MAP['log']
def quant_func(self: Self, level: float, yhat: np.ndarray, scale: float=1, **kwargs: Any) -> np.ndarray:
"""
Quantile function of the underlying distribution
Parameters
----------
level : float
Level of confidence in (0,1)
yhat : np.ndarray
Predicted values.
scale : Union[float, np.ndarray, None]
Scale parameter of the distribution.
Returns
-------
np.ndarray
Quantile at given level
"""
p = scale / (scale + yhat)
return nbinom.ppf(level, n=scale, p=p)
def preprocess(self: Self, stan_data: ModelInputData, **kwargs: Any) -> tuple[ModelInputData, ModelParams]:
"""
Augment the input data for the stan model with model dependent data
and calculate initial guesses for model parameters.
Parameters
----------
stan_data : ModelInputData
Model agnostic input data provided by the forecaster interface
Returns
-------
ModelInputData
Updated stan_data
ModelParams
Guesses for the model parameters depending on the data
"""
y_scaled, self.linked_offset, self.linked_scale = self.normalize_data(y=stan_data.y, lower_bound=True)
stan_data.linked_offset = self.linked_offset
stan_data.linked_scale = self.linked_scale
ini_params = self.initial_trend_parameters(y_scaled, stan_data)
stan_data.variance_max = self.estimate_variance(stan_data, ini_params)
return (stan_data, ini_params)
|
class NegativeBinomial(ModelBackendBase):
'''
Implementation of model backend for negative binomial distribution
'''
def quant_func(self: Self, level: float, yhat: np.ndarray, scale: float=1, **kwargs: Any) -> np.ndarray:
'''
Quantile function of the underlying distribution
Parameters
----------
level : float
Level of confidence in (0,1)
yhat : np.ndarray
Predicted values.
scale : Union[float, np.ndarray, None]
Scale parameter of the distribution.
Returns
-------
np.ndarray
Quantile at given level
'''
pass
def preprocess(self: Self, stan_data: ModelInputData, **kwargs: Any) -> tuple[ModelInputData, ModelParams]:
'''
Augment the input data for the stan model with model dependent data
and calculate initial guesses for model parameters.
Parameters
----------
stan_data : ModelInputData
Model agnostic input data provided by the forecaster interface
Returns
-------
ModelInputData
Updated stan_data
ModelParams
Guesses for the model parameters depending on the data
'''
pass
| 3
| 3
| 35
| 6
| 10
| 20
| 1
| 1.96
| 1
| 5
| 2
| 0
| 2
| 2
| 2
| 36
| 84
| 14
| 24
| 17
| 13
| 47
| 14
| 9
| 11
| 1
| 5
| 0
| 2
|
328,373
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/models.py
|
gloria.models.Normal
|
import numpy as np
from typing_extensions import Self, TypeAlias
from scipy.stats import beta, betabinom, binom, gamma, nbinom, norm, poisson
from typing import Any, Callable, Literal, Mapping, Optional, Type, Union, cast
class Normal(ModelBackendBase):
"""
Implementation of model backend for normal distribution
"""
stan_file = BASEPATH / 'stan_models/normal.stan'
kind = 'biuf'
link_pair = LINK_FUNC_MAP['id']
def quant_func(self: Self, level: float, yhat: np.ndarray, scale: float=1, **kwargs: Any) -> np.ndarray:
"""
Quantile function of the underlying distribution
Parameters
----------
level : float
Level of confidence in (0,1)
yhat : np.ndarray
Predicted values.
scale : Union[float, np.ndarray, None]
Scale parameter of the distribution. Equals observation noise for
normal distribution.
Returns
-------
np.ndarray
Quantile at given level
"""
return norm.ppf(level, loc=yhat, scale=scale)
def preprocess(self: Self, stan_data: ModelInputData, **kwargs: Any) -> tuple[ModelInputData, ModelParams]:
"""
Augment the input data for the stan model with model dependent data
and calculate initial guesses for model parameters.
Parameters
----------
stan_data : ModelInputData
Model agnostic input data provided by the forecaster interface
Returns
-------
ModelInputData
Updated stan_data
ModelParams
Guesses for the model parameters depending on the data
"""
y_scaled, self.linked_offset, self.linked_scale = self.normalize_data(y=stan_data.y)
stan_data.linked_offset = self.linked_offset
stan_data.linked_scale = self.linked_scale
ini_params = self.initial_trend_parameters(y_scaled, stan_data)
variance_max = self.estimate_variance(stan_data, ini_params)
stan_data.variance_max = variance_max
return (stan_data, ini_params)
|
class Normal(ModelBackendBase):
'''
Implementation of model backend for normal distribution
'''
def quant_func(self: Self, level: float, yhat: np.ndarray, scale: float=1, **kwargs: Any) -> np.ndarray:
'''
Quantile function of the underlying distribution
Parameters
----------
level : float
Level of confidence in (0,1)
yhat : np.ndarray
Predicted values.
scale : Union[float, np.ndarray, None]
Scale parameter of the distribution. Equals observation noise for
normal distribution.
Returns
-------
np.ndarray
Quantile at given level
'''
pass
def preprocess(self: Self, stan_data: ModelInputData, **kwargs: Any) -> tuple[ModelInputData, ModelParams]:
'''
Augment the input data for the stan model with model dependent data
and calculate initial guesses for model parameters.
Parameters
----------
stan_data : ModelInputData
Model agnostic input data provided by the forecaster interface
Returns
-------
ModelInputData
Updated stan_data
ModelParams
Guesses for the model parameters depending on the data
'''
pass
| 3
| 3
| 34
| 6
| 10
| 18
| 1
| 1.83
| 1
| 5
| 2
| 0
| 2
| 2
| 2
| 36
| 82
| 15
| 24
| 17
| 13
| 44
| 14
| 9
| 11
| 1
| 5
| 0
| 2
|
328,374
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/models.py
|
gloria.models.Poisson
|
from scipy.stats import beta, betabinom, binom, gamma, nbinom, norm, poisson
import numpy as np
from typing import Any, Callable, Literal, Mapping, Optional, Type, Union, cast
from typing_extensions import Self, TypeAlias
class Poisson(ModelBackendBase):
"""
Implementation of model backend for poisson distribution
"""
stan_file = BASEPATH / 'stan_models/poisson.stan'
kind = 'bu'
link_pair = LINK_FUNC_MAP['log']
def quant_func(self: Self, level: float, yhat: np.ndarray, **kwargs: Any) -> np.ndarray:
"""
Quantile function of the underlying distribution
Parameters
----------
level : float
Level of confidence in (0,1)
yhat : np.ndarray
Predicted values.
scale : Union[float, np.ndarray, None]
Scale parameter of the distribution. None for Poisson distribution.
Returns
-------
np.ndarray
Quantile at given level
"""
return poisson.ppf(level, yhat)
def preprocess(self: Self, stan_data: ModelInputData, **kwargs: Any) -> tuple[ModelInputData, ModelParams]:
"""
Augment the input data for the stan model with model dependent data
and calculate initial guesses for model parameters.
Parameters
----------
stan_data : ModelInputData
Model agnostic input data provided by the forecaster interface
Returns
-------
ModelInputData
Updated stan_data
ModelParams
Guesses for the model parameters depending on the data
"""
y_scaled, self.linked_offset, self.linked_scale = self.normalize_data(y=stan_data.y, lower_bound=True)
stan_data.linked_offset = self.linked_offset
stan_data.linked_scale = self.linked_scale
ini_params = self.initial_trend_parameters(y_scaled, stan_data)
stan_data.variance_max = self.estimate_variance(stan_data, ini_params)
return (stan_data, ini_params)
|
class Poisson(ModelBackendBase):
'''
Implementation of model backend for poisson distribution
'''
def quant_func(self: Self, level: float, yhat: np.ndarray, **kwargs: Any) -> np.ndarray:
'''
Quantile function of the underlying distribution
Parameters
----------
level : float
Level of confidence in (0,1)
yhat : np.ndarray
Predicted values.
scale : Union[float, np.ndarray, None]
Scale parameter of the distribution. None for Poisson distribution.
Returns
-------
np.ndarray
Quantile at given level
'''
pass
def preprocess(self: Self, stan_data: ModelInputData, **kwargs: Any) -> tuple[ModelInputData, ModelParams]:
'''
Augment the input data for the stan model with model dependent data
and calculate initial guesses for model parameters.
Parameters
----------
stan_data : ModelInputData
Model agnostic input data provided by the forecaster interface
Returns
-------
ModelInputData
Updated stan_data
ModelParams
Guesses for the model parameters depending on the data
'''
pass
| 3
| 3
| 31
| 6
| 8
| 18
| 1
| 2.32
| 1
| 5
| 2
| 0
| 2
| 2
| 2
| 36
| 76
| 14
| 19
| 12
| 12
| 44
| 13
| 8
| 10
| 1
| 5
| 0
| 2
|
328,375
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/models.py
|
gloria.models.Uncertainty
|
import numpy as np
from pydantic import BaseModel, ConfigDict, Field, ValidationInfo, field_validator
class Uncertainty(BaseModel):
"""
Small container class for holding trend uncertainties
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
lower: np.ndarray
upper: np.ndarray
|
class Uncertainty(BaseModel):
'''
Small container class for holding trend uncertainties
'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0.67
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 12
| 2
| 6
| 2
| 5
| 4
| 4
| 2
| 3
| 0
| 5
| 0
| 0
|
328,376
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/profiles.py
|
gloria.profiles.BoxCar
|
from typing_extensions import Self
from typing import Any, Type
import pandas as pd
from gloria.utilities.types import Timedelta
class BoxCar(Profile):
"""
A BoxCar shaped profile.
For a given time :math:`t` the profile can be described by
.. math::
f(t) = \\left\\{
\\begin{array}{ll}
1 & t_0 \\le t < t_0 + w \\\\
0 & \\text{otherwise}
\\end{array}
\\right.
with ``width=w`` being a constructor parameter and ``t_anchor=t_0`` the
input of :meth:`~gloria.BoxCar.generate`. The following plot illustrates
the boxcar function.
.. image:: ../pics/example_boxcar.png
:align: center
:width: 500
:alt: Example plot of a boxcar function.
.. note::
Setting the boxcar profile's ``width`` equal to the :class:`Gloria`
model's``sampling_period`` yields a :math:`\\delta`-shaped regressor -
identical to the holiday regressors used by
`Prophet <https://facebook.github.io/prophet/>`_.
Parameters
----------
width : :class:`pandas.Timedelta` | str
Temporal width of the boxcar function given as
:class:`pandas.Timedelta` or string representing such.
"""
width: Timedelta
def generate(self: Self, timestamps: pd.Series, t_anchor: pd.Timestamp) -> pd.Series:
"""
Generate a time series with a single boxcar profile.
Parameters
----------
timestamps : :class:`pandas.Series`
The input timestamps at which the boxcar profile is to be
evaluated.
t_anchor : :class:`pandas.Timestamp`
Location of the boxcar's rising edge
Returns
-------
:class:`pandas.Series`
The output time series including the boxcar profile with amplitude
1.
"""
mask = (timestamps >= t_anchor) & (timestamps < t_anchor + self.width)
return mask * 1
def to_dict(self: Self) -> dict[str, Any]:
"""
Converts the BoxCar profile to a JSON-serializable dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all profile fields including an extra
``profile_type = "BoxCar"`` item.
"""
profile_dict = super().to_dict()
profile_dict['width'] = str(self.width)
return profile_dict
@classmethod
def from_dict(cls: Type[Self], profile_dict: dict[str, Any]) -> Self:
"""
Creates a BoxCar object from a dictionary.
The key-value pairs of the dictionary must correspond to the
constructor arguments of the profile.
Parameters
----------
profile_dict : dict[str, Any]
Dictionary containing all profile fields
Returns
-------
BoxCar
BoxCar object with fields from ``profile_dict``
"""
profile_dict['width'] = pd.Timedelta(profile_dict['width'])
return cls(**profile_dict)
|
class BoxCar(Profile):
'''
A BoxCar shaped profile.
For a given time :math:`t` the profile can be described by
.. math::
f(t) = \left\{
\begin{array}{ll}
1 & t_0 \le t < t_0 + w \\
0 & \text{otherwise}
\end{array}
\right.
with ``width=w`` being a constructor parameter and ``t_anchor=t_0`` the
input of :meth:`~gloria.BoxCar.generate`. The following plot illustrates
the boxcar function.
.. image:: ../pics/example_boxcar.png
:align: center
:width: 500
:alt: Example plot of a boxcar function.
.. note::
Setting the boxcar profile's ``width`` equal to the :class:`Gloria`
model's``sampling_period`` yields a :math:`\delta`-shaped regressor -
identical to the holiday regressors used by
`Prophet <https://facebook.github.io/prophet/>`_.
Parameters
----------
width : :class:`pandas.Timedelta` | str
Temporal width of the boxcar function given as
:class:`pandas.Timedelta` or string representing such.
'''
def generate(self: Self, timestamps: pd.Series, t_anchor: pd.Timestamp) -> pd.Series:
'''
Generate a time series with a single boxcar profile.
Parameters
----------
timestamps : :class:`pandas.Series`
The input timestamps at which the boxcar profile is to be
evaluated.
t_anchor : :class:`pandas.Timestamp`
Location of the boxcar's rising edge
Returns
-------
:class:`pandas.Series`
The output time series including the boxcar profile with amplitude
1.
'''
pass
def to_dict(self: Self) -> dict[str, Any]:
'''
Converts the BoxCar profile to a JSON-serializable dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all profile fields including an extra
``profile_type = "BoxCar"`` item.
'''
pass
@classmethod
def from_dict(cls: Type[Self], profile_dict: dict[str, Any]) -> Self:
'''
Creates a BoxCar object from a dictionary.
The key-value pairs of the dictionary must correspond to the
constructor arguments of the profile.
Parameters
----------
profile_dict : dict[str, Any]
Dictionary containing all profile fields
Returns
-------
BoxCar
BoxCar object with fields from ``profile_dict``
'''
pass
| 5
| 4
| 19
| 2
| 4
| 13
| 1
| 4.47
| 1
| 5
| 0
| 0
| 2
| 0
| 3
| 90
| 98
| 16
| 15
| 9
| 8
| 67
| 12
| 6
| 8
| 1
| 6
| 0
| 3
|
328,377
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/profiles.py
|
gloria.profiles.Cauchy
|
from gloria.utilities.types import Timedelta
import pandas as pd
from typing_extensions import Self
from typing import Any, Type
class Cauchy(Profile):
"""
A Cauchy shaped profile.
For a given time :math:`t` the profile can be described by
.. math::
f(t) = \\frac{1}{4\\cdot \\left(t-t_0 \\right)^2 / w^2 + 1}
with ``width=w`` being a constructor parameter as well as ``t_anchor=t_0``
the input of :meth:`~gloria.Cauchy.generate`. The following plot
illustrates the Cauchy function.
.. image:: ../pics/example_cauchy.png
:align: center
:width: 500
:alt: Example plot of a Cauchy function.
Parameters
----------
width : :class:`pandas.Timedelta` | str
Temporal width of the Cauchy function given as
:class:`pandas.Timedelta` or string representing such.
"""
width: Timedelta
def generate(self: Self, timestamps: pd.Series, t_anchor: pd.Timestamp) -> pd.Series:
"""
Generate a time series with a single Cauchy profile.
Parameters
----------
timestamps : :class:`pandas.Series`
The input timestamps at which the Cauchy profile is to be
evaluated.
t_anchor : :class:`pandas.Timestamp`
Location of the Cauchy profile's mode.
Returns
-------
:class:`pandas.Series`
The output time series including the Cauchy profile with amplitude
1.
"""
t = (timestamps - t_anchor) / self.width
return 1 / (4 * t ** 2 + 1)
def to_dict(self: Self) -> dict[str, Any]:
"""
Converts the Cauchy profile to a JSON-serializable dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all profile fields including an extra
``profile_type = "Cauchy"`` item.
"""
profile_dict = super().to_dict()
profile_dict['width'] = str(self.width)
return profile_dict
@classmethod
def from_dict(cls: Type[Self], profile_dict: dict[str, Any]) -> Self:
"""
Creates a Cauchy object from a dictionary.
The key-value pairs of the dictionary must correspond to the
constructor arguments of the profile.
Parameters
----------
profile_dict : dict[str, Any]
Dictionary containing all profile fields
Returns
-------
Cauchy
Cauchy object with fields from ``profile_dict``
"""
profile_dict['width'] = pd.Timedelta(profile_dict['width'])
return cls(**profile_dict)
|
class Cauchy(Profile):
'''
A Cauchy shaped profile.
For a given time :math:`t` the profile can be described by
.. math::
f(t) = \frac{1}{4\cdot \left(t-t_0 \right)^2 / w^2 + 1}
with ``width=w`` being a constructor parameter as well as ``t_anchor=t_0``
the input of :meth:`~gloria.Cauchy.generate`. The following plot
illustrates the Cauchy function.
.. image:: ../pics/example_cauchy.png
:align: center
:width: 500
:alt: Example plot of a Cauchy function.
Parameters
----------
width : :class:`pandas.Timedelta` | str
Temporal width of the Cauchy function given as
:class:`pandas.Timedelta` or string representing such.
'''
def generate(self: Self, timestamps: pd.Series, t_anchor: pd.Timestamp) -> pd.Series:
'''
Generate a time series with a single Cauchy profile.
Parameters
----------
timestamps : :class:`pandas.Series`
The input timestamps at which the Cauchy profile is to be
evaluated.
t_anchor : :class:`pandas.Timestamp`
Location of the Cauchy profile's mode.
Returns
-------
:class:`pandas.Series`
The output time series including the Cauchy profile with amplitude
1.
'''
pass
def to_dict(self: Self) -> dict[str, Any]:
'''
Converts the Cauchy profile to a JSON-serializable dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all profile fields including an extra
``profile_type = "Cauchy"`` item.
'''
pass
@classmethod
def from_dict(cls: Type[Self], profile_dict: dict[str, Any]) -> Self:
'''
Creates a Cauchy object from a dictionary.
The key-value pairs of the dictionary must correspond to the
constructor arguments of the profile.
Parameters
----------
profile_dict : dict[str, Any]
Dictionary containing all profile fields
Returns
-------
Cauchy
Cauchy object with fields from ``profile_dict``
'''
pass
| 5
| 4
| 20
| 2
| 4
| 14
| 1
| 3.93
| 1
| 5
| 0
| 0
| 2
| 0
| 3
| 90
| 92
| 18
| 15
| 9
| 8
| 59
| 12
| 6
| 8
| 1
| 6
| 0
| 3
|
328,378
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/profiles.py
|
gloria.profiles.Exponential
|
from pydantic import BaseModel, ConfigDict, Field, ValidationInfo, field_validator
from gloria.utilities.types import Timedelta
from typing import Any, Type
from typing_extensions import Self
import pandas as pd
import numpy as np
class Exponential(Profile):
"""
A two-sided exponential decay shaped profile.
For a given time :math:`t` the profile can be described by
.. math::
f(t) = \\exp\\left(
-\\log 2 \\left|\\frac{t-t_0}{w\\left(t\\right)}\\right|
\\right).
Here, :math:`w\\left(t\\right) = w_\\text{lead}` is the left-sided
lead-width for :math:`t<t_0` and :math:`w\\left(t\\right) = w_\\text{lag}`
is the right-sided lag-width for :math:`t\\ge t_0`, set by ``lead_width``
and ``lag_width`` in the constructor, respectively. The parameter
``t_anchor=t_0`` is an input of :meth:`~gloria.Exponential.generate`. The
following plot illustrates the two-sided exponential decay function.
.. image:: ../pics/example_exponential.png
:align: center
:width: 500
:alt: Example plot of a two-sided exponential decay function.
Parameters
----------
lead_width : :class:`pandas.Timedelta` | str
Temporal left-sided lead-width of the exponential function given as
:class:`pandas.Timedelta` or string representing such.
lag_width : :class:`pandas.Timedelta` | str
Temporal right-sided lag-width of the exponential function given as
:class:`pandas.Timedelta` or string representing such.
"""
lead_width: Timedelta
lag_width: Timedelta
@field_validator('lead_width')
@classmethod
def validate_lead_width(cls: Type[Self], lead_width: Timedelta) -> Timedelta:
"""
If lead width is below zero, sets to zero and warn user
"""
if lead_width < Timedelta(0):
from gloria.utilities.logging import get_logger
get_logger().warning('Lead width of exponential decay < 0 interpreted as lag decay. Setting lead_width = 0.')
lead_width = Timedelta(0)
return lead_width
@field_validator('lag_width')
@classmethod
def validate_lag_width(cls: Type[Self], lag_width: Timedelta, other_fields: ValidationInfo) -> Timedelta:
"""
If lag width is below zero, sets to zero and warn user. Also check
whether lag_width = lag_width = 0 and issue warning.
:meta private:
"""
if lag_width < Timedelta(0):
from gloria.utilities.logging import get_logger
get_logger().warning('Lag width of exponential decay profile < 0 interpreted as lead decay. Setting lag_width = 0.')
lag_width = Timedelta(0)
if (lag_width == Timedelta(0)) & (other_fields.data['lead_width'] == Timedelta(0)):
from gloria.utilities.logging import get_logger
get_logger().warning('Lead and lag width of exponential decay profile = 0 - likely numerical issues during fitting.')
return lag_width
def generate(self: Self, timestamps: pd.Series, t_anchor: pd.Timestamp) -> pd.Series:
"""
Generate a time series with a single Exponential profile.
Parameters
----------
timestamps : :class:`pandas.Series`
The input timestamps at which the Exponential profile is to be
evaluated.
t_anchor : :class:`pandas.Timestamp`
Location of the Exponential profile's mode.
Returns
-------
:class:`pandas.Series`
The output time series including the Exponential profile with
amplitude 1.
"""
t = timestamps - t_anchor
mask_lead = timestamps < t_anchor
mask_lag = timestamps >= t_anchor
y = np.zeros_like(timestamps, dtype=float)
if self.lead_width > pd.Timedelta(0):
arg = np.log(2) * np.asarray(t[mask_lead] / self.lead_width)
y[mask_lead] += np.exp(arg)
if self.lag_width > pd.Timedelta(0):
arg = np.log(2) * np.asarray(t[mask_lag] / self.lag_width)
y[mask_lag] += np.exp(-arg)
return y
def to_dict(self: Self) -> dict[str, Any]:
"""
Converts the Exponential profile to a JSON-serializable dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all profile fields including an extra
``profile_type = "Exponential"`` item.
"""
profile_dict = super().to_dict()
profile_dict['lead_width'] = str(self.lead_width)
profile_dict['lag_width'] = str(self.lag_width)
return profile_dict
@classmethod
def from_dict(cls: Type[Self], profile_dict: dict[str, Any]) -> Self:
"""
Creates a Exponential object from a dictionary.
The key-value pairs of the dictionary must correspond to the
constructor arguments of the profile.
Parameters
----------
profile_dict : dict[str, Any]
Dictionary containing all profile fields
Returns
-------
Exponential
Exponential object with fields from ``profile_dict``
"""
profile_dict['lead_width'] = pd.Timedelta(profile_dict['lead_width'])
profile_dict['lag_width'] = pd.Timedelta(profile_dict['lag_width'])
return cls(**profile_dict)
|
class Exponential(Profile):
'''
A two-sided exponential decay shaped profile.
For a given time :math:`t` the profile can be described by
.. math::
f(t) = \exp\left(
-\log 2 \left|\frac{t-t_0}{w\left(t\right)}\right|
\right).
Here, :math:`w\left(t\right) = w_\text{lead}` is the left-sided
lead-width for :math:`t<t_0` and :math:`w\left(t\right) = w_\text{lag}`
is the right-sided lag-width for :math:`t\ge t_0`, set by ``lead_width``
and ``lag_width`` in the constructor, respectively. The parameter
``t_anchor=t_0`` is an input of :meth:`~gloria.Exponential.generate`. The
following plot illustrates the two-sided exponential decay function.
.. image:: ../pics/example_exponential.png
:align: center
:width: 500
:alt: Example plot of a two-sided exponential decay function.
Parameters
----------
lead_width : :class:`pandas.Timedelta` | str
Temporal left-sided lead-width of the exponential function given as
:class:`pandas.Timedelta` or string representing such.
lag_width : :class:`pandas.Timedelta` | str
Temporal right-sided lag-width of the exponential function given as
:class:`pandas.Timedelta` or string representing such.
'''
@field_validator('lead_width')
@classmethod
def validate_lead_width(cls: Type[Self], lead_width: Timedelta) -> Timedelta:
'''
If lead width is below zero, sets to zero and warn user
'''
pass
@field_validator('lag_width')
@classmethod
def validate_lag_width(cls: Type[Self], lag_width: Timedelta, other_fields: ValidationInfo) -> Timedelta:
'''
If lag width is below zero, sets to zero and warn user. Also check
whether lag_width = lag_width = 0 and issue warning.
:meta private:
'''
pass
def generate(self: Self, timestamps: pd.Series, t_anchor: pd.Timestamp) -> pd.Series:
'''
Generate a time series with a single Exponential profile.
Parameters
----------
timestamps : :class:`pandas.Series`
The input timestamps at which the Exponential profile is to be
evaluated.
t_anchor : :class:`pandas.Timestamp`
Location of the Exponential profile's mode.
Returns
-------
:class:`pandas.Series`
The output time series including the Exponential profile with
amplitude 1.
'''
pass
def to_dict(self: Self) -> dict[str, Any]:
'''
Converts the Exponential profile to a JSON-serializable dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all profile fields including an extra
``profile_type = "Exponential"`` item.
'''
pass
@classmethod
def from_dict(cls: Type[Self], profile_dict: dict[str, Any]) -> Self:
'''
Creates a Exponential object from a dictionary.
The key-value pairs of the dictionary must correspond to the
constructor arguments of the profile.
Parameters
----------
profile_dict : dict[str, Any]
Dictionary containing all profile fields
Returns
-------
Exponential
Exponential object with fields from ``profile_dict``
'''
pass
| 11
| 6
| 25
| 3
| 11
| 11
| 2
| 1.3
| 1
| 7
| 0
| 0
| 2
| 0
| 5
| 92
| 172
| 27
| 63
| 26
| 41
| 82
| 39
| 15
| 30
| 3
| 6
| 1
| 10
|
328,379
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/profiles.py
|
gloria.profiles.Gaussian
|
from pydantic import BaseModel, ConfigDict, Field, ValidationInfo, field_validator
from typing import Any, Type
import numpy as np
from typing_extensions import Self
import pandas as pd
from gloria.utilities.types import Timedelta
class Gaussian(Profile):
"""
A Gaussian shaped profile with ``order`` parameter for generating flat-top
Gaussians.
For a given time :math:`t` the profile can be described by
.. math::
f(t) = \\exp\\left(-\\left(
\\frac{\\left(t-t_0\\right)^2}{2\\sigma^2}
\\right)^n\\right)
with ``width=sigma`` and ``order=n`` being constructor parameters as well
as ``t_anchor=t_0`` the input of :meth:`~gloria.Gaussian.generate`. For
:math:`n=1` the function is a simple Gaussian and for increasing :math:`n`
its maximum region increasingly flattens. The following plot illustrates
the Gaussian function for different :math:`n`.
.. image:: ../pics/example_gaussian.png
:align: center
:width: 500
:alt: Example plot of a Gaussian function.
Parameters
----------
width : :class:`pandas.Timedelta` | str
Temporal width of the Gaussian function given as
:class:`pandas.Timedelta` or string representing such.
order : float
Controls the flatness of the Gaussian function with ``order=1`` being a
usual Gaussian and a flat-top function for increasing ``order``. Must
be greater than 0.
"""
width: Timedelta
order: float = Field(gt=0, default=1.0)
def generate(self: Self, timestamps: pd.Series, t_anchor: pd.Timestamp) -> pd.Series:
"""
Generate a time series with a single Gaussian profile.
Parameters
----------
timestamps : :class:`pandas.Series`
The input timestamps at which the Gaussian profile is to be
evaluated.
t_anchor : :class:`pandas.Timestamp`
Location of the Gaussian profile's mode.
Returns
-------
:class:`pandas.Series`
The output time series including the Gaussian profile with
amplitude 1.
"""
t = (timestamps - t_anchor) / self.width
return np.exp(-(0.5 * t ** 2) ** self.order)
def to_dict(self: Self) -> dict[str, Any]:
"""
Converts the Gaussian profile to a JSON-serializable dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all profile fields including an extra
``profile_type = "Gaussian"`` item.
"""
profile_dict = super().to_dict()
profile_dict['width'] = str(self.width)
profile_dict['order'] = self.order
return profile_dict
@classmethod
def from_dict(cls: Type[Self], profile_dict: dict[str, Any]) -> Self:
"""
Creates a Gaussian object from a dictionary.
The key-value pairs of the dictionary must correspond to the
constructor arguments of the profile.
Parameters
----------
profile_dict : dict[str, Any]
Dictionary containing all profile fields
Returns
-------
Gaussian
Gaussian object with fields from ``profile_dict``
"""
profile_dict['width'] = pd.Timedelta(profile_dict['width'])
return cls(**profile_dict)
|
class Gaussian(Profile):
'''
A Gaussian shaped profile with ``order`` parameter for generating flat-top
Gaussians.
For a given time :math:`t` the profile can be described by
.. math::
f(t) = \exp\left(-\left(
\frac{\left(t-t_0\right)^2}{2\sigma^2}
\right)^n\right)
with ``width=sigma`` and ``order=n`` being constructor parameters as well
as ``t_anchor=t_0`` the input of :meth:`~gloria.Gaussian.generate`. For
:math:`n=1` the function is a simple Gaussian and for increasing :math:`n`
its maximum region increasingly flattens. The following plot illustrates
the Gaussian function for different :math:`n`.
.. image:: ../pics/example_gaussian.png
:align: center
:width: 500
:alt: Example plot of a Gaussian function.
Parameters
----------
width : :class:`pandas.Timedelta` | str
Temporal width of the Gaussian function given as
:class:`pandas.Timedelta` or string representing such.
order : float
Controls the flatness of the Gaussian function with ``order=1`` being a
usual Gaussian and a flat-top function for increasing ``order``. Must
be greater than 0.
'''
def generate(self: Self, timestamps: pd.Series, t_anchor: pd.Timestamp) -> pd.Series:
'''
Generate a time series with a single Gaussian profile.
Parameters
----------
timestamps : :class:`pandas.Series`
The input timestamps at which the Gaussian profile is to be
evaluated.
t_anchor : :class:`pandas.Timestamp`
Location of the Gaussian profile's mode.
Returns
-------
:class:`pandas.Series`
The output time series including the Gaussian profile with
amplitude 1.
'''
pass
def to_dict(self: Self) -> dict[str, Any]:
'''
Converts the Gaussian profile to a JSON-serializable dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all profile fields including an extra
``profile_type = "Gaussian"`` item.
'''
pass
@classmethod
def from_dict(cls: Type[Self], profile_dict: dict[str, Any]) -> Self:
'''
Creates a Gaussian object from a dictionary.
The key-value pairs of the dictionary must correspond to the
constructor arguments of the profile.
Parameters
----------
profile_dict : dict[str, Any]
Dictionary containing all profile fields
Returns
-------
Gaussian
Gaussian object with fields from ``profile_dict``
'''
pass
| 5
| 4
| 21
| 3
| 4
| 14
| 1
| 4
| 1
| 5
| 0
| 0
| 2
| 0
| 3
| 90
| 104
| 19
| 17
| 10
| 10
| 68
| 14
| 7
| 10
| 1
| 6
| 0
| 3
|
328,380
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/profiles.py
|
gloria.profiles.Profile
|
from typing import Any, Type
import pandas as pd
from pydantic import BaseModel, ConfigDict, Field, ValidationInfo, field_validator
from typing_extensions import Self
from abc import ABC, abstractmethod
class Profile(BaseModel, ABC):
"""
Abstract base class for all profiles
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
@property
def _profile_type(self: Self):
"""
Returns name of the profile class.
"""
return type(self).__name__
@abstractmethod
def generate(self: Self, timestamps: pd.Series, t_anchor: pd.Timestamp) -> pd.Series:
"""
Generate a time series with a single instance of the profile.
Parameters
----------
timestamps : :class:`pandas.Series`
The input timestamps as independent variable
t_anchor : :class:`pandas.Timestamp`
Location of the profile
Raises
------
NotImplementedError
In case the inheriting Profile class did not implement the generate
method
Returns
-------
:class:`pandas.Series`
The output time series including the profile.
"""
pass
def to_dict(self: Self) -> dict[str, Any]:
"""
Converts the profile to a serializable dictionary.
Returns
-------
dict[str, Any]
Dictionary containing the profile type. All other profile fields
will be added by profile child classes.
"""
profile_dict = {'profile_type': self._profile_type}
return profile_dict
@classmethod
@abstractmethod
def from_dict(cls: Type[Self], profile_dict: dict[str, Any]) -> Self:
"""
Forward declaration of class method for static type checking.
See details in profile_from_dict().
"""
pass
@classmethod
def check_for_missing_keys(cls: Type[Self], profile_dict: dict[str, Any]) -> None:
"""
Confirms that all required fields for the requested profile type are
found in the profile dictionary.
Parameters
----------
profile_dict : dict[str, Any]
Dictionary containing all profile fields
Raises
------
KeyError
Raised if any keys are missing
Returns
-------
None
"""
required_fields = {name for name, info in cls.model_fields.items() if info.is_required()}
missing_keys = required_fields - set(profile_dict.keys())
if missing_keys:
missing_keys_str = ', '.join([f"'{key}'" for key in missing_keys])
raise KeyError(f"Key(s) {missing_keys_str} required for profile of type '{cls.__name__}' but not found in profile dictionary.")
|
class Profile(BaseModel, ABC):
'''
Abstract base class for all profiles
'''
@property
def _profile_type(self: Self):
'''
Returns name of the profile class.
'''
pass
@abstractmethod
def generate(self: Self, timestamps: pd.Series, t_anchor: pd.Timestamp) -> pd.Series:
'''
Generate a time series with a single instance of the profile.
Parameters
----------
timestamps : :class:`pandas.Series`
The input timestamps as independent variable
t_anchor : :class:`pandas.Timestamp`
Location of the profile
Raises
------
NotImplementedError
In case the inheriting Profile class did not implement the generate
method
Returns
-------
:class:`pandas.Series`
The output time series including the profile.
'''
pass
def to_dict(self: Self) -> dict[str, Any]:
'''
Converts the profile to a serializable dictionary.
Returns
-------
dict[str, Any]
Dictionary containing the profile type. All other profile fields
will be added by profile child classes.
'''
pass
@classmethod
@abstractmethod
def from_dict(cls: Type[Self], profile_dict: dict[str, Any]) -> Self:
'''
Forward declaration of class method for static type checking.
See details in profile_from_dict().
'''
pass
@classmethod
def check_for_missing_keys(cls: Type[Self], profile_dict: dict[str, Any]) -> None:
'''
Confirms that all required fields for the requested profile type are
found in the profile dictionary.
Parameters
----------
profile_dict : dict[str, Any]
Dictionary containing all profile fields
Raises
------
KeyError
Raised if any keys are missing
Returns
-------
None
'''
pass
| 11
| 6
| 17
| 1
| 5
| 10
| 1
| 1.57
| 2
| 6
| 0
| 4
| 3
| 0
| 5
| 87
| 103
| 13
| 35
| 20
| 20
| 55
| 17
| 11
| 11
| 2
| 5
| 1
| 6
|
328,381
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/protocols/calendric.py
|
gloria.protocols.calendric.CalendricData
|
from gloria.utilities.misc import infer_sampling_period
import numpy as np
from gloria.profiles import BoxCar, Profile
from gloria.utilities.constants import _HOLIDAY
from gloria.utilities.logging import get_logger
from typing_extensions import Self
from gloria.protocols.protocol_base import Protocol
from typing import TYPE_CHECKING, Any, Optional, Type, Union, cast
from pydantic import Field, field_validator
import pandas as pd
class CalendricData(Protocol):
"""
Manage calendar-driven seasonal cycles and public-holiday effects for a
:class:`Gloria` forecaster.
The protocol contributes:
* **Seasonalities** - yearly, quarterly, monthly, weekly, and daily terms.
* **Holidays** - :class:`Holiday` event regressors for every public
holiday in ``country`` and (optionally) ``subdiv``.
Parameters
----------
country : str | None
Two-letter ISO
`ISO 3166-1 alpha-2 code <https://tinyurl.com/msw8fajk>`_ of the
country (e.g. ``"US"``, ``"DE"``). If ``None`` (default), no holiday
regressors are created.
subdiv : str | None
An optional `ISO 3166-2 subdivion code <https://tinyurl.com/2b432nrx>`_
(e.g. state, province etc.). If ``None``, only nationwide holidays are
considered.
holiday_prior_scale : float | None
Parameter modulating the strength of all holiday regressors. Larger
values allow the model to fit larger holiday impact, smaller values
dampen the impact. Must be larger than 0. If ``None`` (default),
the forecaster's ``event_prior_scale`` is used.
holiday_profile : :ref:`Profile <ref-profiles>`
Profile object that defines the temporal shape of each holiday
regressor. The default is a one-day :class:`BoxCar` profile replicating
Prophet-style holiday regressors.
seasonality_prior_scale : float | None
Global strength parameter for every seasonality added by the protocol.
Larger values permit stronger seasonal variation, smaller values dampen
it. Must be larger than 0.If ``None`` (default), the forecaster's
``seasonality_prior_scale`` is used.
yearly_seasonality, quarterly_seasonality, monthly_seasonality, weekly_seasonality, daily_seasonality : bool | int | "auto"
Configures how to add the respective seasonality to the model. Details
see below.
.. rubric:: Seasonality Options
The behaviour of the seasonal components is controlled by the
``yearly_seasonality``, ``quarterly_seasonality``, ``monthly_seasonality``,
``weekly_seasonality``, and ``daily_seasonality`` parameters. Valid values
are:
* ``True``: add the seasonality with the default maximum Fourier order
(see table below).
* ``False``: do **not** add the seasonality.
* ``"auto"``: add the seasonality if the data span at least two full
cycles. Choose the smaller of the default order and the highest order
allowed by the `Nyquist theorem <https://tinyurl.com/425tj4wb>`_ as
maximum order.
* ``integer >= 1``: add the seasonality with that integer as the maximum
order.
.. rubric:: Default Maximum Orders
+-----------+------------+-------------------+
| **Name** | **Period** | **Default Order** |
+===========+============+===================+
| yearly | 365.25 d | 10 |
+-----------+------------+-------------------+
| quarterly | 91.31 d | 2 |
+-----------+------------+-------------------+
| monthly | 30.44 d | 3 |
+-----------+------------+-------------------+
| weekly | 7 d | 3 |
+-----------+------------+-------------------+
| daily | 1 d | 4 |
+-----------+------------+-------------------+
.. admonition:: Note on Quarterly Seasonality
:class: caution
The quarterly component is a strict subset of the yearly component.
It is therefore automatically disabled if the yearly seasonality is
enabled, overriding the setting of ``quarterly_seasonality``.
"""
country: Optional[str] = None
subdiv: Optional[str] = None
holiday_prior_scale: Optional[float] = Field(gt=0, default=None)
holiday_profile: Profile = BoxCar(width=pd.Timedelta('1d'))
seasonality_prior_scale: Optional[float] = Field(gt=0, default=None)
yearly_seasonality: Union[bool, str, int] = 'auto'
quarterly_seasonality: Union[bool, str, int] = False
monthly_seasonality: Union[bool, str, int] = False
weekly_seasonality: Union[bool, str, int] = 'auto'
daily_seasonality: Union[bool, str, int] = 'auto'
@field_validator('holiday_profile', mode='before')
@classmethod
def validate_holiday_profile(cls: Type[Self], holiday_profile: Union[Profile, dict[str, Any]]) -> Profile:
"""
In case the input profile was given as a dictionary this
before-validator attempts to convert it to an Profile.
"""
try:
if isinstance(holiday_profile, dict):
return Profile.from_dict(holiday_profile)
except Exception as e:
raise ValueError(f'Creating profile from dictionary failed: {e}') from e
return holiday_profile
@field_validator(*(s + '_seasonality' for s in DEFAULT_SEASONALITIES.keys()))
@classmethod
def validate_seasonality_arg(cls: Type[Self], arg: Union[bool, str, int]) -> Union[bool, str, int]:
"""
Validates the xy_seasonality arguments, which must be 'auto', boolean,
or an integer >=1.
"""
if isinstance(arg, str) and arg == 'auto':
return arg
if isinstance(arg, bool):
return arg
if isinstance(arg, int) and arg >= 1:
return arg
raise ValueError("Must be 'auto', a boolean, or an integer >= 0.")
def set_events(self: Self, model: 'Gloria', timestamps: pd.Series) -> 'Gloria':
"""
Adds all holidays for specified country and subdivision to the Gloria
object.
Only holidays whose dates fall within the span covered by
``timestamps`` are added; all others are ignored.
.. note::
You may call :meth:`set_events` directly to add the holidays.
When the protocol is registered via :meth:`Gloria.add_protocol`,
however, it is invoked automatically during
:meth:`Gloria.fit`, so an explicit call is rarely required.
Parameters
----------
model : :class:`Gloria`
The Gloria model to be updated
timestamps : :class:`pandas.Series`
A Series of :class:`pandas.Timestamp`. Only holidays within the
range set by ``timestamps`` will be added to the model.
Returns
-------
:class:`Gloria`
The updated Gloria model.
"""
ps = self.holiday_prior_scale
ps = model.event_prior_scale if ps is None else ps
if self.country is not None:
holiday_df = make_holiday_dataframe(timestamps=timestamps, country=self.country, subdiv=self.subdiv)
holiday_names = set(holiday_df[_HOLIDAY].unique())
for holiday in holiday_names:
if holiday in model.events:
get_logger().info(f"Skipping calendric protocol holiday '{holiday}' as as it was added to the model before.")
continue
model.add_event(name=holiday, prior_scale=ps, regressor_type='Holiday', profile=self.holiday_profile, country=self.country, subdiv=self.subdiv)
return model
def set_seasonalities(self: Self, model: 'Gloria', timestamps: pd.Series) -> 'Gloria':
"""
Adds yearly, quarterly, monthly, weekly, daily seasonalities to the
Gloria object.
The ruleset whether and how to add each seasonality is described in the
:class:`CalendricData` constructor in detail.
.. note::
You may call :meth:`set_seasonalities` directly to add the features.
When the protocol is registered via :meth:`Gloria.add_protocol`,
however, it is invoked automatically during
:meth:`Gloria.fit`, so an explicit call is rarely required.
Parameters
----------
model : :class:`Gloria`
The Gloria model to be updated
timestamps : :class:`pandas.Series`
A Series of :class:`pandas.Timestamp`.
Returns
-------
:class:`Gloria`
The updated Gloria model.
"""
ps = self.seasonality_prior_scale
ps = model.seasonality_prior_scale if ps is None else ps
inferred_sampling_period = infer_sampling_period(timestamps, q=0.3)
timespan = timestamps.max() - timestamps.min() + inferred_sampling_period
skip_quarterly = self.yearly_seasonality is True or (self.yearly_seasonality == 'auto' and timespan / DEFAULT_SEASONALITIES['yearly']['period'] >= 2) or (isinstance(self.yearly_seasonality, int) and self.yearly_seasonality > 3)
for season, prop in DEFAULT_SEASONALITIES.items():
if season in model.seasonalities:
get_logger().info(f"Skipping calendric protocol seasonality '{season}' as it was added to the model before.")
continue
period_loc = cast(pd.Timedelta, prop['period'])
default_order_loc = cast(int, prop['default_order'])
if season == 'quarterly' and skip_quarterly:
get_logger().info('Quarterly seasonality will not be added to Gloria model due to interference with yearly seasonality.')
continue
add_mode = self.__dict__[season + '_seasonality']
if add_mode is True:
fourier_order = default_order_loc
elif add_mode is False:
continue
elif add_mode == 'auto':
if timespan / period_loc < 2:
get_logger().info(f'Disabling {season} season. Configure protocol with {season}_seasonality = True to overwrite this.')
continue
max_order = int(np.floor(period_loc / (2 * inferred_sampling_period)))
fourier_order = min(default_order_loc, max_order)
if fourier_order == 0:
get_logger().info(f'Disabling {season} season. Configure protocol with {season}_seasonality = True to overwrite this.')
continue
else:
fourier_order = add_mode
model.add_seasonality(name=season, period=str(period_loc), fourier_order=fourier_order, prior_scale=ps)
return model
def to_dict(self: Self) -> dict[str, Any]:
"""
Converts the calendric data protocol to a JSON-serializable dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all protocol fields.
"""
protocol_dict = {**super().to_dict(), **self.model_dump(), 'holiday_profile': self.holiday_profile.to_dict()}
return protocol_dict
@classmethod
def from_dict(cls: Type[Self], protocol_dict: dict[str, Any]) -> Self:
"""
Creates CalendricData protocol from a dictionary.
The key-value pairs of the dictionary must correspond to the
constructor arguments of the protocol.
Parameters
----------
protocol_dict : dict[str, Any]
Dictionary containing all protocol fields
Returns
-------
:class:`CalendricData`
CalendricData protocol object with fields from ``protocol_dict``
"""
cls.check_for_missing_keys(protocol_dict)
return cls(**protocol_dict)
|
class CalendricData(Protocol):
'''
Manage calendar-driven seasonal cycles and public-holiday effects for a
:class:`Gloria` forecaster.
The protocol contributes:
* **Seasonalities** - yearly, quarterly, monthly, weekly, and daily terms.
* **Holidays** - :class:`Holiday` event regressors for every public
holiday in ``country`` and (optionally) ``subdiv``.
Parameters
----------
country : str | None
Two-letter ISO
`ISO 3166-1 alpha-2 code <https://tinyurl.com/msw8fajk>`_ of the
country (e.g. ``"US"``, ``"DE"``). If ``None`` (default), no holiday
regressors are created.
subdiv : str | None
An optional `ISO 3166-2 subdivion code <https://tinyurl.com/2b432nrx>`_
(e.g. state, province etc.). If ``None``, only nationwide holidays are
considered.
holiday_prior_scale : float | None
Parameter modulating the strength of all holiday regressors. Larger
values allow the model to fit larger holiday impact, smaller values
dampen the impact. Must be larger than 0. If ``None`` (default),
the forecaster's ``event_prior_scale`` is used.
holiday_profile : :ref:`Profile <ref-profiles>`
Profile object that defines the temporal shape of each holiday
regressor. The default is a one-day :class:`BoxCar` profile replicating
Prophet-style holiday regressors.
seasonality_prior_scale : float | None
Global strength parameter for every seasonality added by the protocol.
Larger values permit stronger seasonal variation, smaller values dampen
it. Must be larger than 0.If ``None`` (default), the forecaster's
``seasonality_prior_scale`` is used.
yearly_seasonality, quarterly_seasonality, monthly_seasonality, weekly_seasonality, daily_seasonality : bool | int | "auto"
Configures how to add the respective seasonality to the model. Details
see below.
.. rubric:: Seasonality Options
The behaviour of the seasonal components is controlled by the
``yearly_seasonality``, ``quarterly_seasonality``, ``monthly_seasonality``,
``weekly_seasonality``, and ``daily_seasonality`` parameters. Valid values
are:
* ``True``: add the seasonality with the default maximum Fourier order
(see table below).
* ``False``: do **not** add the seasonality.
* ``"auto"``: add the seasonality if the data span at least two full
cycles. Choose the smaller of the default order and the highest order
allowed by the `Nyquist theorem <https://tinyurl.com/425tj4wb>`_ as
maximum order.
* ``integer >= 1``: add the seasonality with that integer as the maximum
order.
.. rubric:: Default Maximum Orders
+-----------+------------+-------------------+
| **Name** | **Period** | **Default Order** |
+===========+============+===================+
| yearly | 365.25 d | 10 |
+-----------+------------+-------------------+
| quarterly | 91.31 d | 2 |
+-----------+------------+-------------------+
| monthly | 30.44 d | 3 |
+-----------+------------+-------------------+
| weekly | 7 d | 3 |
+-----------+------------+-------------------+
| daily | 1 d | 4 |
+-----------+------------+-------------------+
.. admonition:: Note on Quarterly Seasonality
:class: caution
The quarterly component is a strict subset of the yearly component.
It is therefore automatically disabled if the yearly seasonality is
enabled, overriding the setting of ``quarterly_seasonality``.
'''
@field_validator('holiday_profile', mode='before')
@classmethod
def validate_holiday_profile(cls: Type[Self], holiday_profile: Union[Profile, dict[str, Any]]) -> Profile:
'''
In case the input profile was given as a dictionary this
before-validator attempts to convert it to an Profile.
'''
pass
@field_validator(*(s + '_seasonality' for s in DEFAULT_SEASONALITIES.keys()))
@classmethod
def validate_seasonality_arg(cls: Type[Self], arg: Union[bool, str, int]) -> Union[bool, str, int]:
'''
Validates the xy_seasonality arguments, which must be 'auto', boolean,
or an integer >=1.
'''
pass
def set_events(self: Self, model: 'Gloria', timestamps: pd.Series) -> 'Gloria':
'''
Adds all holidays for specified country and subdivision to the Gloria
object.
Only holidays whose dates fall within the span covered by
``timestamps`` are added; all others are ignored.
.. note::
You may call :meth:`set_events` directly to add the holidays.
When the protocol is registered via :meth:`Gloria.add_protocol`,
however, it is invoked automatically during
:meth:`Gloria.fit`, so an explicit call is rarely required.
Parameters
----------
model : :class:`Gloria`
The Gloria model to be updated
timestamps : :class:`pandas.Series`
A Series of :class:`pandas.Timestamp`. Only holidays within the
range set by ``timestamps`` will be added to the model.
Returns
-------
:class:`Gloria`
The updated Gloria model.
'''
pass
def set_seasonalities(self: Self, model: 'Gloria', timestamps: pd.Series) -> 'Gloria':
'''
Adds yearly, quarterly, monthly, weekly, daily seasonalities to the
Gloria object.
The ruleset whether and how to add each seasonality is described in the
:class:`CalendricData` constructor in detail.
.. note::
You may call :meth:`set_seasonalities` directly to add the features.
When the protocol is registered via :meth:`Gloria.add_protocol`,
however, it is invoked automatically during
:meth:`Gloria.fit`, so an explicit call is rarely required.
Parameters
----------
model : :class:`Gloria`
The Gloria model to be updated
timestamps : :class:`pandas.Series`
A Series of :class:`pandas.Timestamp`.
Returns
-------
:class:`Gloria`
The updated Gloria model.
'''
pass
def to_dict(self: Self) -> dict[str, Any]:
'''
Converts the calendric data protocol to a JSON-serializable dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all protocol fields.
'''
pass
@classmethod
def from_dict(cls: Type[Self], protocol_dict: dict[str, Any]) -> Self:
'''
Creates CalendricData protocol from a dictionary.
The key-value pairs of the dictionary must correspond to the
constructor arguments of the protocol.
Parameters
----------
protocol_dict : dict[str, Any]
Dictionary containing all protocol fields
Returns
-------
:class:`CalendricData`
CalendricData protocol object with fields from ``protocol_dict``
'''
pass
| 12
| 7
| 43
| 4
| 21
| 18
| 4
| 1.22
| 1
| 11
| 1
| 0
| 3
| 0
| 6
| 94
| 365
| 45
| 144
| 46
| 122
| 176
| 74
| 32
| 67
| 10
| 6
| 3
| 24
|
328,382
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/protocols/calendric.py
|
gloria.protocols.calendric.Holiday
|
from gloria.utilities.constants import _HOLIDAY
import pandas as pd
from gloria.profiles import BoxCar, Profile
from typing_extensions import Self
from gloria.regressors import IntermittentEvent
from typing import TYPE_CHECKING, Any, Optional, Type, Union, cast
class Holiday(IntermittentEvent):
"""
A regressor to model events coinciding with public holidays.
The regressor is added to the :class:`Gloria` model either using
:meth:`~Gloria.add_event` or by adding the :class:`CalendricData` protocoll
via :meth:`Gloria.add_protocol` and does not need to be handled directly by
the user.
Parameters
----------
name : str
A descriptive, unique name to identify the regressor. Note that the
``name`` must equal the desired public holiday name as registered in
the `holiday <https://holidays.readthedocs.io/en/latest/>`_ package.
The function :func:`get_holidays` may be used to inspect valid
holiday names.
prior_scale : float
Parameter modulating the strength of the regressors. Larger values
allow the model to fit a larger impact of the event, smaller
values dampen the impact. Must be larger than zero.
profile : Profile
The profile that periodically occurs. Allowed profile types are
described in the :ref:`ref-profiles` section.
t_list : list[:class:`pandas.Timestamp`]
A list of timestamps at which ``profile`` occurs. The exact meaning of
each timestamp in the list depends on implementation details of the
underlying ``profile``, but typically refers to its mode.
.. note::
A user provided ``t_list`` will be ignored and overwritten with an
automatically generated list of holiday occurrences.
country : str
The `ISO 3166-1 alpha-2 code <https://tinyurl.com/msw8fajk>`_ of the
holiday`s country.
subdiv : str | None
The `ISO 3166-2 code <https://tinyurl.com/2b432nrx>`_ code of the
country`s subdivision, if applicable.
"""
country: str
subdiv: Optional[str] = None
def to_dict(self: Self) -> dict[str, Any]:
"""
Converts the periodic event regressor to a JSON-serializable
dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all regressor fields including an extra
``regressor_type = "Holiday"`` item.
"""
regressor_dict = super().to_dict()
regressor_dict.pop('t_list')
regressor_dict['country'] = self.country
regressor_dict['subdiv'] = self.subdiv
return regressor_dict
@classmethod
def from_dict(cls: Type[Self], regressor_dict: dict[str, Any]) -> Self:
"""
Creates an Holiday object from a dictionary.
The key-value pairs of the dictionary must correspond to the
constructor arguments of the regressor.
Parameters
----------
regressor_dict : dict[str, Any]
Dictionary containing all regressor fields
Returns
-------
PeriodicEvent
PeriodicEvent regressor instance with fields from
``regressor_dict``
"""
cls.check_for_missing_keys(regressor_dict)
regressor_dict['profile'] = Profile.from_dict(regressor_dict['profile'])
return cls(**regressor_dict)
def get_t_list(self: Self, t: pd.Series) -> list[pd.Timestamp]:
"""
Yields a list of timestamps of holiday occurrences within the range of
input timestamps.
Parameters
----------
t : :class:`pandas.Series`
A pandas series of :class:`pandas.Timestamp`.
Returns
-------
t_list : list[:class:`pandas.Timestamp`]
A list of timestamps of holiday occurrences.
"""
t_name = 'dummy'
holiday_df = make_holiday_dataframe(timestamps=t, country=self.country, subdiv=self.subdiv, timestamp_name=t_name)
t_list = holiday_df[t_name].loc[holiday_df[_HOLIDAY] == self.name].to_list()
return t_list
def get_impact(self: Self, t: pd.Series) -> float:
"""
Calculate fraction of overall profiles occurring within a timerange.
Parameters
----------
t : :class:`pandas.Series`
A series of :class:`pandas.Timestamp`.
Returns
-------
impact : float
Fraction of overall profiles occurring between minimum and maximum
date of ``t``.
"""
t_list = self.get_t_list(t)
if len(t_list) == 0:
return 0.0
impact = sum((float(t.min() <= t0 <= t.max()) for t0 in t_list))
impact /= len(t_list)
return impact
def make_feature(self: Self, t: pd.Series, regressor: Optional[pd.Series]=None) -> tuple[pd.DataFrame, dict]:
"""
Create the feature matrix for the holiday regressor.
Parameters
----------
t : :class:`pandas.Series`
A series of :class:`pandas.Timestamp` at which the regressor has to
be evaluated
regressor : :class:`pandas.Series`
Contains the values for the regressor that will be added to the
feature matrix unchanged. Only has effect for
:class:`ExternalRegressor`. Any input will be ignored for
:class:`Holiday`.
Returns
-------
X : :class:`pandas.DataFrame`
The feature matrix containing the data of the regressor.
prior_scales : dict
A map for ``feature matrix column name`` → ``prior_scale``.
"""
self.t_list = self.get_t_list(t)
return super().make_feature(t)
|
class Holiday(IntermittentEvent):
'''
A regressor to model events coinciding with public holidays.
The regressor is added to the :class:`Gloria` model either using
:meth:`~Gloria.add_event` or by adding the :class:`CalendricData` protocoll
via :meth:`Gloria.add_protocol` and does not need to be handled directly by
the user.
Parameters
----------
name : str
A descriptive, unique name to identify the regressor. Note that the
``name`` must equal the desired public holiday name as registered in
the `holiday <https://holidays.readthedocs.io/en/latest/>`_ package.
The function :func:`get_holidays` may be used to inspect valid
holiday names.
prior_scale : float
Parameter modulating the strength of the regressors. Larger values
allow the model to fit a larger impact of the event, smaller
values dampen the impact. Must be larger than zero.
profile : Profile
The profile that periodically occurs. Allowed profile types are
described in the :ref:`ref-profiles` section.
t_list : list[:class:`pandas.Timestamp`]
A list of timestamps at which ``profile`` occurs. The exact meaning of
each timestamp in the list depends on implementation details of the
underlying ``profile``, but typically refers to its mode.
.. note::
A user provided ``t_list`` will be ignored and overwritten with an
automatically generated list of holiday occurrences.
country : str
The `ISO 3166-1 alpha-2 code <https://tinyurl.com/msw8fajk>`_ of the
holiday`s country.
subdiv : str | None
The `ISO 3166-2 code <https://tinyurl.com/2b432nrx>`_ code of the
country`s subdivision, if applicable.
'''
def to_dict(self: Self) -> dict[str, Any]:
'''
Converts the periodic event regressor to a JSON-serializable
dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all regressor fields including an extra
``regressor_type = "Holiday"`` item.
'''
pass
@classmethod
def from_dict(cls: Type[Self], regressor_dict: dict[str, Any]) -> Self:
'''
Creates an Holiday object from a dictionary.
The key-value pairs of the dictionary must correspond to the
constructor arguments of the regressor.
Parameters
----------
regressor_dict : dict[str, Any]
Dictionary containing all regressor fields
Returns
-------
PeriodicEvent
PeriodicEvent regressor instance with fields from
``regressor_dict``
'''
pass
def get_t_list(self: Self, t: pd.Series) -> list[pd.Timestamp]:
'''
Yields a list of timestamps of holiday occurrences within the range of
input timestamps.
Parameters
----------
t : :class:`pandas.Series`
A pandas series of :class:`pandas.Timestamp`.
Returns
-------
t_list : list[:class:`pandas.Timestamp`]
A list of timestamps of holiday occurrences.
'''
pass
def get_impact(self: Self, t: pd.Series) -> float:
'''
Calculate fraction of overall profiles occurring within a timerange.
Parameters
----------
t : :class:`pandas.Series`
A series of :class:`pandas.Timestamp`.
Returns
-------
impact : float
Fraction of overall profiles occurring between minimum and maximum
date of ``t``.
'''
pass
def make_feature(self: Self, t: pd.Series, regressor: Optional[pd.Series]=None) -> tuple[pd.DataFrame, dict]:
'''
Create the feature matrix for the holiday regressor.
Parameters
----------
t : :class:`pandas.Series`
A series of :class:`pandas.Timestamp` at which the regressor has to
be evaluated
regressor : :class:`pandas.Series`
Contains the values for the regressor that will be added to the
feature matrix unchanged. Only has effect for
:class:`ExternalRegressor`. Any input will be ignored for
:class:`Holiday`.
Returns
-------
X : :class:`pandas.DataFrame`
The feature matrix containing the data of the regressor.
prior_scales : dict
A map for ``feature matrix column name`` → ``prior_scale``.
'''
pass
| 7
| 6
| 27
| 3
| 7
| 16
| 1
| 2.95
| 1
| 9
| 1
| 0
| 4
| 1
| 5
| 98
| 184
| 26
| 40
| 17
| 31
| 118
| 28
| 14
| 22
| 2
| 8
| 1
| 6
|
328,383
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/protocols/protocol_base.py
|
gloria.protocols.protocol_base.Protocol
|
from abc import ABC, abstractmethod
import pandas as pd
from typing import TYPE_CHECKING, Any, Type
from pydantic import BaseModel, ConfigDict
from typing_extensions import Self
class Protocol(ABC, BaseModel):
"""
Protocols can be added to Gloria models in order to configure them based
on the type of data that the model is supposed to fit.
This abstract base class defines the Protocol interface and some basic
functionalities
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
@property
def _protocol_type(self: Self) -> str:
"""
Returns name of the protocol class.
"""
return type(self).__name__
def to_dict(self: Self) -> dict[str, Any]:
"""
Converts the Protocol to a serializable dictionary.
Returns
-------
dict[str, Any]
Dictionary containing only the protocol type. Keys corresponding to
other model fields will be added by the subclasses.
"""
protocol_dict = {'protocol_type': self._protocol_type}
return protocol_dict
@classmethod
@abstractmethod
def from_dict(cls: Type[Self], protocol_dict: dict[str, Any]) -> Self:
"""
Forward declaration of class method for static type checking.
See details in protocol_from_dict().
"""
pass
@classmethod
def check_for_missing_keys(cls: Type[Self], protocol_dict: dict[str, Any]) -> None:
"""
Confirms that all required fields for the requested protocol type are
found in the protocol dictionary.
Parameters
----------
protocol_dict : dict[str, Any]
Dictionary containing all protocol fields
Raises
------
KeyError
Raised if any keys are missing
Returns
-------
None
"""
required_fields = {name for name, info in cls.model_fields.items() if info.is_required()}
missing_keys = required_fields - set(protocol_dict.keys())
if missing_keys:
missing_keys_str = ', '.join([f"'{key}'" for key in missing_keys])
raise KeyError(f'Key(s) {missing_keys_str} required for protocols of type {cls.__name__} but not found in protocol dictionary.')
@abstractmethod
def set_seasonalities(self, model: 'Gloria', timestamps: pd.Series) -> 'Gloria':
"""
Determines valid seasonalities according to protocol and input
timestamps and adds them to the model.
Parameters
----------
model : Gloria
The model the protocol should be applied to.
timestamps : pd.Series
A pandas series containing timestamps.
Returns
-------
None
"""
pass
@abstractmethod
def set_events(self, model: 'Gloria', timestamps: pd.Series) -> 'Gloria':
"""
Determines valid events according to protocol and input timestamps and
adds them to the model.
Parameters
----------
model : Gloria
The model the protocol should be applied to.
timestamps : pd.Series
A pandas series containing timestamps.
Returns
-------
None
"""
pass
|
class Protocol(ABC, BaseModel):
'''
Protocols can be added to Gloria models in order to configure them based
on the type of data that the model is supposed to fit.
This abstract base class defines the Protocol interface and some basic
functionalities
'''
@property
def _protocol_type(self: Self) -> str:
'''
Returns name of the protocol class.
'''
pass
def to_dict(self: Self) -> dict[str, Any]:
'''
Converts the Protocol to a serializable dictionary.
Returns
-------
dict[str, Any]
Dictionary containing only the protocol type. Keys corresponding to
other model fields will be added by the subclasses.
'''
pass
@classmethod
@abstractmethod
def from_dict(cls: Type[Self], protocol_dict: dict[str, Any]) -> Self:
'''
Forward declaration of class method for static type checking.
See details in protocol_from_dict().
'''
pass
@classmethod
def check_for_missing_keys(cls: Type[Self], protocol_dict: dict[str, Any]) -> None:
'''
Confirms that all required fields for the requested protocol type are
found in the protocol dictionary.
Parameters
----------
protocol_dict : dict[str, Any]
Dictionary containing all protocol fields
Raises
------
KeyError
Raised if any keys are missing
Returns
-------
None
'''
pass
@abstractmethod
def set_seasonalities(self, model: 'Gloria', timestamps: pd.Series) -> 'Gloria':
'''
Determines valid seasonalities according to protocol and input
timestamps and adds them to the model.
Parameters
----------
model : Gloria
The model the protocol should be applied to.
timestamps : pd.Series
A pandas series containing timestamps.
Returns
-------
None
'''
pass
@abstractmethod
def set_events(self, model: 'Gloria', timestamps: pd.Series) -> 'Gloria':
'''
Determines valid events according to protocol and input timestamps and
adds them to the model.
Parameters
----------
model : Gloria
The model the protocol should be applied to.
timestamps : pd.Series
A pandas series containing timestamps.
Returns
-------
None
'''
pass
| 13
| 7
| 17
| 2
| 5
| 10
| 1
| 1.69
| 2
| 6
| 0
| 1
| 4
| 0
| 6
| 88
| 123
| 18
| 39
| 22
| 22
| 66
| 19
| 12
| 12
| 2
| 5
| 1
| 7
|
328,384
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/regressors.py
|
gloria.regressors.EventRegressor
|
import pandas as pd
from typing import Any, Optional, Type
from typing_extensions import Self
from abc import ABC, abstractmethod
from gloria.profiles import Profile
class EventRegressor(Regressor):
"""
A base class used to create a regressor based on an event
"""
profile: Profile
def to_dict(self: Self) -> dict[str, Any]:
"""
Converts the EventRegressor to a serializable dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all regressor fields
"""
regressor_dict = super().to_dict()
regressor_dict['profile'] = self.profile.to_dict()
return regressor_dict
@abstractmethod
def get_impact(self: Self, t: pd.Series) -> float:
"""
Calculates the fraction of overall profiles within the timestamp range
"""
pass
|
class EventRegressor(Regressor):
'''
A base class used to create a regressor based on an event
'''
def to_dict(self: Self) -> dict[str, Any]:
'''
Converts the EventRegressor to a serializable dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all regressor fields
'''
pass
@abstractmethod
def get_impact(self: Self, t: pd.Series) -> float:
'''
Calculates the fraction of overall profiles within the timestamp range
'''
pass
| 4
| 3
| 10
| 1
| 3
| 6
| 1
| 1.78
| 1
| 6
| 0
| 2
| 2
| 0
| 2
| 89
| 29
| 4
| 9
| 5
| 5
| 16
| 8
| 4
| 5
| 1
| 6
| 0
| 2
|
328,385
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/regressors.py
|
gloria.regressors.ExternalRegressor
|
import pandas as pd
from typing import Any, Optional, Type
from gloria.utilities.constants import _DELIM
from typing_extensions import Self
class ExternalRegressor(Regressor):
"""
A regressor based on user-provided data.
The regressor is added to the :class:`Gloria` model using
:meth:`~Gloria.add_external_regressor` and does not need to be handled
directly by the user. Instead of synthesizing the regressor data, they must
be provided to :meth:`~Gloria.fit` as part of the input data frame.
Parameters
----------
name : str
A descriptive, unique name to identify the regressor
prior_scale : float
Parameter modulating the strength of the regressors. Larger values
allow the model to fit larger impact, smaller values dampen the impact.
Must be larger than zero.
"""
@classmethod
def from_dict(cls: Type[Self], regressor_dict: dict[str, Any]) -> Self:
"""
Creates an ExternalRegressor object from a dictionary.
The key-value pairs of the dictionary must correspond to the
constructor arguments of the regressor.
Parameters
----------
regressor_dict : dict[str, Any]
Dictionary containing all regressor fields.
Returns
-------
ExternalRegressor
ExternalRegressor instance with fields from ``regressor_dict``.
"""
return cls(**regressor_dict)
def make_feature(self: Self, t: pd.Series, regressor: Optional[pd.Series]=None) -> tuple[pd.DataFrame, dict]:
"""
Creates the feature matrix for the external regressor.
Parameters
----------
t : pd.Series
A pandas series of timestamps at which the regressor has to be
evaluated. For ``ExternalRegressor`` this is only used to validate
that the input ``regressor`` data and timestamps ``t`` have
identical shapes.
regressor : pd.Series
Contains the values for the regressor that will be added to the
feature matrix unchanged.
Returns
-------
X : pd.DataFrame
The feature matrix containing the data of the regressor.
prior_scales : dict
A map for ``feature matrix column name`` → ``prior_scale``
"""
if not isinstance(regressor, pd.Series):
raise TypeError('External Regressor must be pandas Series.')
if t.shape[0] != regressor.shape[0]:
raise ValueError(f'Provided data for extra Regressor {self.name} do not have same length as timestamp column.')
column = f'{self._regressor_type}{_DELIM}{self.name}'
X = pd.DataFrame({column: regressor.values})
prior_scales = {column: self.prior_scale}
return (X, prior_scales)
|
class ExternalRegressor(Regressor):
'''
A regressor based on user-provided data.
The regressor is added to the :class:`Gloria` model using
:meth:`~Gloria.add_external_regressor` and does not need to be handled
directly by the user. Instead of synthesizing the regressor data, they must
be provided to :meth:`~Gloria.fit` as part of the input data frame.
Parameters
----------
name : str
A descriptive, unique name to identify the regressor
prior_scale : float
Parameter modulating the strength of the regressors. Larger values
allow the model to fit larger impact, smaller values dampen the impact.
Must be larger than zero.
'''
@classmethod
def from_dict(cls: Type[Self], regressor_dict: dict[str, Any]) -> Self:
'''
Creates an ExternalRegressor object from a dictionary.
The key-value pairs of the dictionary must correspond to the
constructor arguments of the regressor.
Parameters
----------
regressor_dict : dict[str, Any]
Dictionary containing all regressor fields.
Returns
-------
ExternalRegressor
ExternalRegressor instance with fields from ``regressor_dict``.
'''
pass
def make_feature(self: Self, t: pd.Series, regressor: Optional[pd.Series]=None) -> tuple[pd.DataFrame, dict]:
'''
Creates the feature matrix for the external regressor.
Parameters
----------
t : pd.Series
A pandas series of timestamps at which the regressor has to be
evaluated. For ``ExternalRegressor`` this is only used to validate
that the input ``regressor`` data and timestamps ``t`` have
identical shapes.
regressor : pd.Series
Contains the values for the regressor that will be added to the
feature matrix unchanged.
Returns
-------
X : pd.DataFrame
The feature matrix containing the data of the regressor.
prior_scales : dict
A map for ``feature matrix column name`` → ``prior_scale``
'''
pass
| 4
| 3
| 29
| 4
| 8
| 17
| 2
| 2.72
| 1
| 7
| 0
| 0
| 1
| 0
| 2
| 89
| 78
| 11
| 18
| 9
| 12
| 49
| 12
| 6
| 9
| 3
| 6
| 1
| 4
|
328,386
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/regressors.py
|
gloria.regressors.IntermittentEvent
|
from gloria.profiles import Profile
from typing_extensions import Self
from typing import Any, Optional, Type
from gloria.utilities.types import Timestamp
from gloria.utilities.constants import _DELIM
import pandas as pd
class IntermittentEvent(EventRegressor):
"""
A regressor to model reoccuring events at given times.
The regressor is added to the :class:`Gloria` model using
:meth:`~Gloria.add_event` and does not need to be handled
directly by the user.
Parameters
----------
name : str
A descriptive, unique name to identify the regressor.
prior_scale : float
Parameter modulating the strength of the regressors. Larger values
allow the model to fit a larger impact of the event, smaller
values dampen the impact. Must be larger than zero.
profile : Profile
The profile that occurs at ``t_anchor``. Allowed profile types are
described in the :ref:`ref-profiles` section.
t_list : list[:class:`pandas.Timestamp`] | list[str]
A list of timestamps at which ``profile`` occurs. The exact meaning of
each timestamp in the list depends on implementation details of the
underlying ``profile``, but typically refers to its mode.
"""
t_list: list[Timestamp] = []
def to_dict(self: Self) -> dict[str, Any]:
"""
Converts the intermittent event regressor to a JSON-serializable
dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all regressor fields including an extra
``regressor_type = "IntermittentEvent"`` item.
"""
regressor_dict = super().to_dict()
regressor_dict['t_list'] = [str(t) for t in self.t_list]
return regressor_dict
@classmethod
def from_dict(cls: Type[Self], regressor_dict: dict[str, Any]) -> Self:
"""
Creates an IntermittentEvent object from a dictionary.
The key-value pairs of the dictionary must correspond to the
constructor arguments of the regressor.
Parameters
----------
regressor_dict : dict[str, Any]
Dictionary containing all regressor fields
Returns
-------
IntermittentEvent
IntermittentEvent regressor instance with fields from
``regressor_dict``
"""
regressor_dict['profile'] = Profile.from_dict(regressor_dict['profile'])
if 't_list' in regressor_dict:
try:
regressor_dict['t_list'] = [pd.Timestamp(t) for t in regressor_dict['t_list']]
except Exception as e:
raise TypeError("Field 't_list' of IntermittentEvent regressor must be a list of objects that can be cast to a pandas timestamp.") from e
return cls(**regressor_dict)
def get_impact(self: Self, t: pd.Series) -> float:
"""
Calculate fraction of overall profiles occurring within a timerange.
Parameters
----------
t : :class:`pandas.Series`
A series of :class:`pandas.Timestamp`.
Returns
-------
impact : float
Fraction of overall profiles occurring between minimum and maximum
date of ``t``.
"""
if len(self.t_list) == 0:
return 0.0
impact = sum((float(t.min() <= t0 <= t.max()) for t0 in self.t_list))
impact /= len(self.t_list)
return impact
def make_feature(self: Self, t: pd.Series, regressor: Optional[pd.Series]=None) -> tuple[pd.DataFrame, dict]:
"""
Create the feature matrix for the intermittent event regressor.
Parameters
----------
t : :class:`pandas.Series`
A series of :class:`pandas.Timestamp` at which the regressor has to
be evaluated
regressor : :class:`pandas.Series`
Contains the values for the regressor that will be added to the
feature matrix unchanged. Only has effect for
:class:`ExternalRegressor`. Any input will be ignored for
:class:`IntermittentEvent`.
Returns
-------
X : :class:`pandas.DataFrame`
The feature matrix containing the data of the regressor.
prior_scales : dict
A map for ``feature matrix column name`` → ``prior_scale``.
"""
t = t.reset_index(drop=True)
column = f'{self._regressor_type}{_DELIM}{self.profile._profile_type}{_DELIM}{self.name}'
all_profiles = pd.Series(0, index=range(t.shape[0]))
for t_anchor in self.t_list:
all_profiles += self.profile.generate(t, t_anchor)
X = pd.DataFrame({column: all_profiles})
prior_scales = {column: self.prior_scale}
return (X, prior_scales)
|
class IntermittentEvent(EventRegressor):
'''
A regressor to model reoccuring events at given times.
The regressor is added to the :class:`Gloria` model using
:meth:`~Gloria.add_event` and does not need to be handled
directly by the user.
Parameters
----------
name : str
A descriptive, unique name to identify the regressor.
prior_scale : float
Parameter modulating the strength of the regressors. Larger values
allow the model to fit a larger impact of the event, smaller
values dampen the impact. Must be larger than zero.
profile : Profile
The profile that occurs at ``t_anchor``. Allowed profile types are
described in the :ref:`ref-profiles` section.
t_list : list[:class:`pandas.Timestamp`] | list[str]
A list of timestamps at which ``profile`` occurs. The exact meaning of
each timestamp in the list depends on implementation details of the
underlying ``profile``, but typically refers to its mode.
'''
def to_dict(self: Self) -> dict[str, Any]:
'''
Converts the intermittent event regressor to a JSON-serializable
dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all regressor fields including an extra
``regressor_type = "IntermittentEvent"`` item.
'''
pass
@classmethod
def from_dict(cls: Type[Self], regressor_dict: dict[str, Any]) -> Self:
'''
Creates an IntermittentEvent object from a dictionary.
The key-value pairs of the dictionary must correspond to the
constructor arguments of the regressor.
Parameters
----------
regressor_dict : dict[str, Any]
Dictionary containing all regressor fields
Returns
-------
IntermittentEvent
IntermittentEvent regressor instance with fields from
``regressor_dict``
'''
pass
def get_impact(self: Self, t: pd.Series) -> float:
'''
Calculate fraction of overall profiles occurring within a timerange.
Parameters
----------
t : :class:`pandas.Series`
A series of :class:`pandas.Timestamp`.
Returns
-------
impact : float
Fraction of overall profiles occurring between minimum and maximum
date of ``t``.
'''
pass
def make_feature(self: Self, t: pd.Series, regressor: Optional[pd.Series]=None) -> tuple[pd.DataFrame, dict]:
'''
Create the feature matrix for the intermittent event regressor.
Parameters
----------
t : :class:`pandas.Series`
A series of :class:`pandas.Timestamp` at which the regressor has to
be evaluated
regressor : :class:`pandas.Series`
Contains the values for the regressor that will be added to the
feature matrix unchanged. Only has effect for
:class:`ExternalRegressor`. Any input will be ignored for
:class:`IntermittentEvent`.
Returns
-------
X : :class:`pandas.DataFrame`
The feature matrix containing the data of the regressor.
prior_scales : dict
A map for ``feature matrix column name`` → ``prior_scale``.
'''
pass
| 6
| 5
| 30
| 4
| 10
| 17
| 2
| 2.1
| 1
| 11
| 1
| 1
| 3
| 0
| 4
| 93
| 154
| 24
| 42
| 17
| 34
| 88
| 29
| 13
| 24
| 3
| 7
| 2
| 8
|
328,387
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/regressors.py
|
gloria.regressors.PeriodicEvent
|
from typing_extensions import Self
from gloria.utilities.constants import _DELIM
import pandas as pd
from gloria.profiles import Profile
from typing import Any, Optional, Type
class PeriodicEvent(SingleEvent):
"""
A regressor to model periodically recurring events.
The regressor is added to the :class:`Gloria` model using
:meth:`~Gloria.add_event` and does not need to be handled
directly by the user.
Parameters
----------
name : str
A descriptive, unique name to identify the regressor.
prior_scale : float
Parameter modulating the strength of the regressors. Larger values
allow the model to fit a larger impact of the event, smaller
values dampen the impact. Must be larger than zero.
profile : Profile
The profile that periodically occurs. Allowed profile types are
described in the :ref:`ref-profiles` section.
t_anchor : :class:`pandas.Timestamp`
An arbitrary timestamp at which ``profile`` occurs. The profile will be
repeated forwards and backwards in time every ``period``. The exact
meaning of ``t_anchor`` depends on the implementation details of the
underlying ``profile``, but typically refers to its mode.
period : :class:`pandas.Timedelta`
Periodicity of the periodic event regressor.
"""
period: pd.Timedelta
def to_dict(self: Self) -> dict[str, Any]:
"""
Converts the periodic event regressor to a JSON-serializable
dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all regressor fields including an extra
``regressor_type = "PeriodicEvent"`` item.
"""
regressor_dict = super().to_dict()
regressor_dict['period'] = str(self.period)
return regressor_dict
@classmethod
def from_dict(cls: Type[Self], regressor_dict: dict[str, Any]) -> Self:
"""
Creates an PeriodocEvent object from a dictionary.
The key-value pairs of the dictionary must correspond to the
constructor arguments of the regressor.
Parameters
----------
regressor_dict : dict[str, Any]
Dictionary containing all regressor fields
Returns
-------
PeriodicEvent
PeriodicEvent regressor instance with fields from
``regressor_dict``.
"""
regressor_dict['t_anchor'] = pd.Timestamp(regressor_dict['t_anchor'])
regressor_dict['period'] = pd.Timedelta(regressor_dict['period'])
regressor_dict['profile'] = Profile.from_dict(regressor_dict['profile'])
return cls(**regressor_dict)
def get_t_list(self: Self, t: pd.Series) -> list[pd.Timestamp]:
"""
Yields a list of timestamps of period starts within the range of
input timestamps.
Parameters
----------
t : :class:`pandas.Series`
A pandas series of :class:`pandas.Timestamp`.
Returns
-------
t_list : list[:class:`pandas.Timestamp`]
A list of timestamps of period starts.
"""
n_margin = 2
n_min = (t.min() - self.t_anchor) // self.period - n_margin
n_max = (t.max() - self.t_anchor) // self.period + n_margin
t_list = [self.t_anchor + n * self.period for n in range(n_min, n_max + 1)]
return t_list
def get_impact(self: Self, t: pd.Series) -> float:
"""
Calculate fraction of overall profiles occurring within a timerange.
Parameters
----------
t : :class:`pandas.Series`
A series of :class:`pandas.Timestamp`.
Returns
-------
impact : float
Fraction of overall profiles occurring between minimum and maximum
date of ``t``.
"""
t_list = self.get_t_list(t)
if len(t_list) == 0:
return 0.0
impact = sum((float(t.min() <= t0 <= t.max()) for t0 in t_list))
impact /= len(t_list)
return impact
def make_feature(self: Self, t: pd.Series, regressor: Optional[pd.Series]=None) -> tuple[pd.DataFrame, dict]:
"""
Create the feature matrix for the periodic event regressor.
Parameters
----------
t : :class:`pandas.Series`
A series of :class:`pandas.Timestamp` at which the regressor has to
be evaluated
regressor : :class:`pandas.Series`
Contains the values for the regressor that will be added to the
feature matrix unchanged. Only has effect for
:class:`ExternalRegressor`. Any input will be ignored for
:class:`PeriodicEvent`.
Returns
-------
X : :class:`pandas.DataFrame`
The feature matrix containing the data of the regressor.
prior_scales : dict
A map for ``feature matrix column name`` → ``prior_scale``.
"""
t = t.reset_index(drop=True)
column = f'{self._regressor_type}{_DELIM}{self.profile._profile_type}{_DELIM}{self.name}'
t_list = self.get_t_list(t)
all_profiles = pd.Series(0, index=range(t.shape[0]))
for t_anchor in t_list:
all_profiles += self.profile.generate(t, t_anchor)
X = pd.DataFrame({column: all_profiles})
prior_scales = {column: self.prior_scale}
return (X, prior_scales)
|
class PeriodicEvent(SingleEvent):
'''
A regressor to model periodically recurring events.
The regressor is added to the :class:`Gloria` model using
:meth:`~Gloria.add_event` and does not need to be handled
directly by the user.
Parameters
----------
name : str
A descriptive, unique name to identify the regressor.
prior_scale : float
Parameter modulating the strength of the regressors. Larger values
allow the model to fit a larger impact of the event, smaller
values dampen the impact. Must be larger than zero.
profile : Profile
The profile that periodically occurs. Allowed profile types are
described in the :ref:`ref-profiles` section.
t_anchor : :class:`pandas.Timestamp`
An arbitrary timestamp at which ``profile`` occurs. The profile will be
repeated forwards and backwards in time every ``period``. The exact
meaning of ``t_anchor`` depends on the implementation details of the
underlying ``profile``, but typically refers to its mode.
period : :class:`pandas.Timedelta`
Periodicity of the periodic event regressor.
'''
def to_dict(self: Self) -> dict[str, Any]:
'''
Converts the periodic event regressor to a JSON-serializable
dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all regressor fields including an extra
``regressor_type = "PeriodicEvent"`` item.
'''
pass
@classmethod
def from_dict(cls: Type[Self], regressor_dict: dict[str, Any]) -> Self:
'''
Creates an PeriodocEvent object from a dictionary.
The key-value pairs of the dictionary must correspond to the
constructor arguments of the regressor.
Parameters
----------
regressor_dict : dict[str, Any]
Dictionary containing all regressor fields
Returns
-------
PeriodicEvent
PeriodicEvent regressor instance with fields from
``regressor_dict``.
'''
pass
def get_t_list(self: Self, t: pd.Series) -> list[pd.Timestamp]:
'''
Yields a list of timestamps of period starts within the range of
input timestamps.
Parameters
----------
t : :class:`pandas.Series`
A pandas series of :class:`pandas.Timestamp`.
Returns
-------
t_list : list[:class:`pandas.Timestamp`]
A list of timestamps of period starts.
'''
pass
def get_impact(self: Self, t: pd.Series) -> float:
'''
Calculate fraction of overall profiles occurring within a timerange.
Parameters
----------
t : :class:`pandas.Series`
A series of :class:`pandas.Timestamp`.
Returns
-------
impact : float
Fraction of overall profiles occurring between minimum and maximum
date of ``t``.
'''
pass
def make_feature(self: Self, t: pd.Series, regressor: Optional[pd.Series]=None) -> tuple[pd.DataFrame, dict]:
'''
Create the feature matrix for the periodic event regressor.
Parameters
----------
t : :class:`pandas.Series`
A series of :class:`pandas.Timestamp` at which the regressor has to
be evaluated
regressor : :class:`pandas.Series`
Contains the values for the regressor that will be added to the
feature matrix unchanged. Only has effect for
:class:`ExternalRegressor`. Any input will be ignored for
:class:`PeriodicEvent`.
Returns
-------
X : :class:`pandas.DataFrame`
The feature matrix containing the data of the regressor.
prior_scales : dict
A map for ``feature matrix column name`` → ``prior_scale``.
'''
pass
| 7
| 6
| 29
| 4
| 8
| 16
| 1
| 2.43
| 1
| 10
| 1
| 0
| 4
| 0
| 5
| 98
| 181
| 30
| 44
| 22
| 35
| 107
| 34
| 19
| 28
| 2
| 8
| 1
| 7
|
328,388
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/regressors.py
|
gloria.regressors.Regressor
|
from typing import Any, Optional, Type
from typing_extensions import Self
from abc import ABC, abstractmethod
import pandas as pd
from pydantic import BaseModel, ConfigDict, Field
class Regressor(BaseModel, ABC):
"""
Base class for adding regressors to the Gloria model and creating the
respective feature matrix
Parameters
----------
name : str
A descriptive, unique name to identify the regressor
prior_scale : float
Parameter modulating the strength of the regressors. Larger values
allow the model to fit larger impact, smaller values dampen the impact.
Must be larger than zero.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
name: str
prior_scale: float = Field(gt=0)
@property
def _regressor_type(self: Self) -> str:
"""
Returns name of the regressor class.
"""
return type(self).__name__
def to_dict(self: Self) -> dict[str, Any]:
"""
Converts the regressor to a JSON-serializable dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all regressor fields including an extra
``regressor_type`` key with the class name as value.
"""
regressor_dict = {k: self.__dict__[k] for k in Regressor.model_fields.keys()}
regressor_dict['regressor_type'] = self._regressor_type
return regressor_dict
@classmethod
@abstractmethod
def from_dict(cls: Type[Self], regressor_dict: dict[str, Any]) -> Self:
"""
Forward declaration of class method for static type checking.
See details in regressor_from_dict().
"""
pass
@classmethod
def check_for_missing_keys(cls: Type[Self], regressor_dict: dict[str, Any]) -> None:
"""
Confirms that all required fields for the requested regressor type are
found in the regressor dictionary.
Parameters
----------
regressor_dict : dict[str, Any]
Dictionary containing all regressor fields
Raises
------
KeyError
Raised if any keys are missing
Returns
-------
None
"""
required_fields = {name for name, info in cls.model_fields.items() if info.is_required()}
missing_keys = required_fields - set(regressor_dict.keys())
if missing_keys:
missing_keys_str = ', '.join([f"'{key}'" for key in missing_keys])
raise KeyError(f'Key(s) {missing_keys_str} required for regressors of type {cls.__name__} but not found in regressor dictionary.')
@abstractmethod
def make_feature(self: Self, t: pd.Series, regressor: Optional[pd.Series]=None) -> tuple[pd.DataFrame, dict]:
"""
Create the feature matrix along with prior scales for a given integer
time vector
Parameters
----------
t : pd.Series
A pandas series of timestamps at which the regressor has to be
evaluated
regressor: pd.Series
Contains the values for the regressor that will be added to the
feature matrix unchanged. Only has effect for ExternalRegressor
Raises
------
NotImplementedError
In case the child regressor did not implement the make_feature()
method yet
Returns
-------
pd.DataFrame
Contains the feature matrix
dict
A map for 'feature matrix column name' -> 'prior_scale'
"""
pass
|
class Regressor(BaseModel, ABC):
'''
Base class for adding regressors to the Gloria model and creating the
respective feature matrix
Parameters
----------
name : str
A descriptive, unique name to identify the regressor
prior_scale : float
Parameter modulating the strength of the regressors. Larger values
allow the model to fit larger impact, smaller values dampen the impact.
Must be larger than zero.
'''
@property
def _regressor_type(self: Self) -> str:
'''
Returns name of the regressor class.
'''
pass
def to_dict(self: Self) -> dict[str, Any]:
'''
Converts the regressor to a JSON-serializable dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all regressor fields including an extra
``regressor_type`` key with the class name as value.
'''
pass
@classmethod
@abstractmethod
def from_dict(cls: Type[Self], regressor_dict: dict[str, Any]) -> Self:
'''
Forward declaration of class method for static type checking.
See details in regressor_from_dict().
'''
pass
@classmethod
def check_for_missing_keys(cls: Type[Self], regressor_dict: dict[str, Any]) -> None:
'''
Confirms that all required fields for the requested regressor type are
found in the regressor dictionary.
Parameters
----------
regressor_dict : dict[str, Any]
Dictionary containing all regressor fields
Raises
------
KeyError
Raised if any keys are missing
Returns
-------
None
'''
pass
@abstractmethod
def make_feature(self: Self, t: pd.Series, regressor: Optional[pd.Series]=None) -> tuple[pd.DataFrame, dict]:
'''
Create the feature matrix along with prior scales for a given integer
time vector
Parameters
----------
t : pd.Series
A pandas series of timestamps at which the regressor has to be
evaluated
regressor: pd.Series
Contains the values for the regressor that will be added to the
feature matrix unchanged. Only has effect for ExternalRegressor
Raises
------
NotImplementedError
In case the child regressor did not implement the make_feature()
method yet
Returns
-------
pd.DataFrame
Contains the feature matrix
dict
A map for 'feature matrix column name' -> 'prior_scale'
'''
pass
| 11
| 6
| 19
| 2
| 6
| 11
| 1
| 1.79
| 2
| 7
| 0
| 3
| 3
| 0
| 5
| 87
| 127
| 18
| 39
| 21
| 24
| 70
| 20
| 12
| 14
| 2
| 5
| 1
| 6
|
328,389
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/regressors.py
|
gloria.regressors.Seasonality
|
import pandas as pd
from itertools import product
from pydantic import BaseModel, ConfigDict, Field
from gloria.utilities.constants import _DELIM
import numpy as np
from typing import Any, Optional, Type
from typing_extensions import Self
class Seasonality(Regressor):
"""
A regressor to model seasonality features from Fourier components.
The regressor is added to the :class:`Gloria` model using
:meth:`~Gloria.add_seasonality` and does not need to be handled
directly by the user. The feature matrix produced by
:meth:`~Seasonality.make_feature` contains :math:`2 \\cdot N` columns
corresponding to the even and odd Fourier terms
.. math::
\\sum_{n=1}^{N}{a_n\\sin\\left(\\frac{2\\pi n}{T} t\\right)
+ b_n\\cos\\left(\\frac{2\\pi n}{T} t\\right)}
where :math:`T` is the fundamental Fourier period and :math:`N` is the
maximum Fourier order to be included, controlled by the parameters
``period`` and ``order``, respectively. The parameters :math:`a_n` and
:math:`b_n` are weighting factor that will be optimized during Gloria's
fitting procedure.
Parameters
----------
name : str
A descriptive, unique name to identify the regressor.
prior_scale : float
Parameter modulating the strength of the regressors. Larger values
allow the model to fit larger seasonal oscillations, smaller values
dampen the impact. Must be larger than zero.
period : float
Fundamental period of the seasonality component. Note that the period
is unitless. It can be understood in units of ``sampling_period`` of
the :class:`~gloria.Gloria` owning this seasonality. Must be larger
than zero.
fourier_order : int
Maximum Fourier order of the underlying series. Even and odd Fourier
terms from fundamental up to ``fourier_order`` will be used as
regressors. Must be larger or equal to 1.
.. warning::
In a future version of Gloria, ``period`` will become a
:class:`pandas.Timestamp` or ``str`` representing such. Where possible
use :meth:`~Gloria.add_seasonality` instead of :class:`Seasonality`
to avoid conflict.
"""
period: float = Field(gt=0)
fourier_order: int = Field(ge=1)
def to_dict(self: Self) -> dict[str, Any]:
"""
Converts the Seasonality regressor to a JSON-serializable dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all regressor fields including an extra
``regressor_type = "Seasonality"`` item.
"""
regressor_dict = super().to_dict()
regressor_dict['period'] = self.period
regressor_dict['fourier_order'] = self.fourier_order
return regressor_dict
@classmethod
def from_dict(cls: Type[Self], regressor_dict: dict[str, Any]) -> Self:
"""
Creates an Seasonality object from a dictionary.
The key-value pairs of the dictionary must correspond to the
constructor arguments of the regressor.
Parameters
----------
regressor_dict : dict[str, Any]
Dictionary containing all regressor fields.
Returns
-------
Seasonality
Seasonality regressor instance with fields from ``regressor_dict``.
"""
return cls(**regressor_dict)
def make_feature(self: Self, t: pd.Series, regressor: Optional[pd.Series]=None) -> tuple[pd.DataFrame, dict]:
"""
Create the feature matrix for the seasonality regressor.
Parameters
----------
t : pd.Series
A pandas series of timestamps at which the regressor has to be
evaluated. The timestamps have to be represented as integers in
units of their sampling frequency.
regressor : pd.Series
Contains the values for the regressor that will be added to the
feature matrix unchanged. Only has effect for
:class:`ExternalRegressor`. Any input will be ignored for
:class:`Seasonality`.
Returns
-------
X : pd.DataFrame
The feature matrix containing the data of the regressor.
prior_scales : dict
A map for ``feature matrix column name`` → ``prior_scale``
.. warning::
In a future version of Gloria, ``period`` will become a
:class:`pandas.Timestamp` or ``str`` representing such and ``t``
will be a :class:`pandas.Series` of timestamps.
"""
orders_str = map(str, range(1, self.fourier_order + 1))
columns = [_DELIM.join(x) for x in product([self._regressor_type], [self.name], ['odd', 'even'], orders_str)]
X = pd.DataFrame(data=self.fourier_series(np.asarray(t), self.period, self.fourier_order), columns=columns)
prior_scales = {col: self.prior_scale for col in columns}
return (X, prior_scales)
@staticmethod
def fourier_series(t: np.ndarray, period: float, fourier_order: int) -> np.ndarray:
"""
Creates an array of even and odd Fourier terms.
The :class:`numpy.ndarray` output array has the following structure:
* **Columns**: alternatingely odd and even Fourier terms up to the
given maximum ``fourier_order``, resulting in :math:`2\\times`
``fourier_order`` columns.
* **Rows**: Fourier terms evaluated at each timestamp, resulting in
``len(t)`` rows.
Parameters
----------
t : np.ndarray
Integer array at which the Fourier components are evaluated.
period : float
Period duration in units of the integer array.
fourier_order : int
Maximum Fourier order up to which Fourier components will be
created. Must be larger or equal 1.
Returns
-------
np.ndarray
The array containing the Fourier components
.. warning::
In a future version of Gloria, ``period`` will become a
:class:`pandas.Timestamp` or ``str`` representing such and ``t``
will be a :class:`pandas.Series` of timestamps.
"""
w0 = 2 * np.pi / period
odd = np.sin(w0 * t.reshape(-1, 1) * np.arange(1, fourier_order + 1))
even = np.cos(w0 * t.reshape(-1, 1) * np.arange(1, fourier_order + 1))
return np.hstack([odd, even])
|
class Seasonality(Regressor):
'''
A regressor to model seasonality features from Fourier components.
The regressor is added to the :class:`Gloria` model using
:meth:`~Gloria.add_seasonality` and does not need to be handled
directly by the user. The feature matrix produced by
:meth:`~Seasonality.make_feature` contains :math:`2 \cdot N` columns
corresponding to the even and odd Fourier terms
.. math::
\sum_{n=1}^{N}{a_n\sin\left(\frac{2\pi n}{T} t\right)
+ b_n\cos\left(\frac{2\pi n}{T} t\right)}
where :math:`T` is the fundamental Fourier period and :math:`N` is the
maximum Fourier order to be included, controlled by the parameters
``period`` and ``order``, respectively. The parameters :math:`a_n` and
:math:`b_n` are weighting factor that will be optimized during Gloria's
fitting procedure.
Parameters
----------
name : str
A descriptive, unique name to identify the regressor.
prior_scale : float
Parameter modulating the strength of the regressors. Larger values
allow the model to fit larger seasonal oscillations, smaller values
dampen the impact. Must be larger than zero.
period : float
Fundamental period of the seasonality component. Note that the period
is unitless. It can be understood in units of ``sampling_period`` of
the :class:`~gloria.Gloria` owning this seasonality. Must be larger
than zero.
fourier_order : int
Maximum Fourier order of the underlying series. Even and odd Fourier
terms from fundamental up to ``fourier_order`` will be used as
regressors. Must be larger or equal to 1.
.. warning::
In a future version of Gloria, ``period`` will become a
:class:`pandas.Timestamp` or ``str`` representing such. Where possible
use :meth:`~Gloria.add_seasonality` instead of :class:`Seasonality`
to avoid conflict.
'''
def to_dict(self: Self) -> dict[str, Any]:
'''
Converts the Seasonality regressor to a JSON-serializable dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all regressor fields including an extra
``regressor_type = "Seasonality"`` item.
'''
pass
@classmethod
def from_dict(cls: Type[Self], regressor_dict: dict[str, Any]) -> Self:
'''
Creates an Seasonality object from a dictionary.
The key-value pairs of the dictionary must correspond to the
constructor arguments of the regressor.
Parameters
----------
regressor_dict : dict[str, Any]
Dictionary containing all regressor fields.
Returns
-------
Seasonality
Seasonality regressor instance with fields from ``regressor_dict``.
'''
pass
def make_feature(self: Self, t: pd.Series, regressor: Optional[pd.Series]=None) -> tuple[pd.DataFrame, dict]:
'''
Create the feature matrix for the seasonality regressor.
Parameters
----------
t : pd.Series
A pandas series of timestamps at which the regressor has to be
evaluated. The timestamps have to be represented as integers in
units of their sampling frequency.
regressor : pd.Series
Contains the values for the regressor that will be added to the
feature matrix unchanged. Only has effect for
:class:`ExternalRegressor`. Any input will be ignored for
:class:`Seasonality`.
Returns
-------
X : pd.DataFrame
The feature matrix containing the data of the regressor.
prior_scales : dict
A map for ``feature matrix column name`` → ``prior_scale``
.. warning::
In a future version of Gloria, ``period`` will become a
:class:`pandas.Timestamp` or ``str`` representing such and ``t``
will be a :class:`pandas.Series` of timestamps.
'''
pass
@staticmethod
def fourier_series(t: np.ndarray, period: float, fourier_order: int) -> np.ndarray:
'''
Creates an array of even and odd Fourier terms.
The :class:`numpy.ndarray` output array has the following structure:
* **Columns**: alternatingely odd and even Fourier terms up to the
given maximum ``fourier_order``, resulting in :math:`2\times`
``fourier_order`` columns.
* **Rows**: Fourier terms evaluated at each timestamp, resulting in
``len(t)`` rows.
Parameters
----------
t : np.ndarray
Integer array at which the Fourier components are evaluated.
period : float
Period duration in units of the integer array.
fourier_order : int
Maximum Fourier order up to which Fourier components will be
created. Must be larger or equal 1.
Returns
-------
np.ndarray
The array containing the Fourier components
.. warning::
In a future version of Gloria, ``period`` will become a
:class:`pandas.Timestamp` or ``str`` representing such and ``t``
will be a :class:`pandas.Series` of timestamps.
'''
pass
| 7
| 5
| 33
| 4
| 9
| 20
| 1
| 3.02
| 1
| 11
| 0
| 0
| 2
| 0
| 4
| 91
| 190
| 29
| 40
| 21
| 29
| 121
| 21
| 15
| 16
| 1
| 6
| 0
| 4
|
328,390
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/regressors.py
|
gloria.regressors.SingleEvent
|
from typing_extensions import Self
from gloria.profiles import Profile
from gloria.utilities.types import Timestamp
import pandas as pd
from gloria.utilities.constants import _DELIM
from typing import Any, Optional, Type
class SingleEvent(EventRegressor):
"""
A regressor to model a single occurrence of an event.
The regressor is added to the :class:`Gloria` model using
:meth:`~Gloria.add_event` and does not need to be handled
directly by the user.
Parameters
----------
name : str
A descriptive, unique name to identify the regressor.
prior_scale : float
Parameter modulating the strength of the regressors. Larger values
allow the model to fit a larger impact of the event, smaller
values dampen the impact. Must be larger than zero.
profile : Profile
The profile that occurs at ``t_anchor``. Allowed profile types are
described in the :ref:`ref-profiles` section.
t_anchor : :class:`pandas.Timestamp` | str
The timestamp at which ``profile`` occurs. The exact meaning of
``t_anchor`` depends on the implementation details of the underlying
``profile``, but typically refers to its mode.
"""
t_anchor: Timestamp
def to_dict(self: Self) -> dict[str, Any]:
"""
Converts the single event regressor to a JSON-serializable dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all regressor fields including an extra
``regressor_type = "SingleEvent"`` item.
"""
regressor_dict = super().to_dict()
regressor_dict['t_anchor'] = str(self.t_anchor)
return regressor_dict
@classmethod
def from_dict(cls: Type[Self], regressor_dict: dict[str, Any]) -> Self:
"""
Creates an SingleEvent object from a dictionary.
The key-value pairs of the dictionary must correspond to the
constructor arguments of the regressor.
Parameters
----------
regressor_dict : dict[str, Any]
Dictionary containing all regressor fields
Returns
-------
SingleEvent
SingleEvent regressor instance with fields from ``regressor_dict``
"""
regressor_dict['t_anchor'] = pd.Timestamp(regressor_dict['t_anchor'])
regressor_dict['profile'] = Profile.from_dict(regressor_dict['profile'])
return cls(**regressor_dict)
def get_impact(self: Self, t: pd.Series) -> float:
"""
Calculate fraction of overall profiles occurring within a timerange.
Parameters
----------
t : :class:`pandas.Series`
A series of :class:`pandas.Timestamp`.
Returns
-------
impact : float
Fraction of overall profiles occurring between minimum and maximum
date of ``t``.
"""
impact = float(t.min() <= self.t_anchor <= t.max())
return impact
def make_feature(self: Self, t: pd.Series, regressor: Optional[pd.Series]=None) -> tuple[pd.DataFrame, dict]:
"""
Create the feature matrix for the single event regressor.
Parameters
----------
t : :class:`pandas.Series`
A series of :class:`pandas.Timestamp` at which the regressor has to
be evaluated
regressor : :class:`pandas.Series`
Contains the values for the regressor that will be added to the
feature matrix unchanged. Only has effect for
:class:`ExternalRegressor`. Any input will be ignored for
:class:`SingleEvent`.
Returns
-------
X : :class:`pandas.DataFrame`
The feature matrix containing the data of the regressor.
prior_scales : dict
A map for ``feature matrix column name`` → ``prior_scale``.
"""
column = f'{self._regressor_type}{_DELIM}{self.profile._profile_type}{_DELIM}{self.name}'
X = pd.DataFrame({column: self.profile.generate(t, self.t_anchor)})
prior_scales = {column: self.prior_scale}
return (X, prior_scales)
|
class SingleEvent(EventRegressor):
'''
A regressor to model a single occurrence of an event.
The regressor is added to the :class:`Gloria` model using
:meth:`~Gloria.add_event` and does not need to be handled
directly by the user.
Parameters
----------
name : str
A descriptive, unique name to identify the regressor.
prior_scale : float
Parameter modulating the strength of the regressors. Larger values
allow the model to fit a larger impact of the event, smaller
values dampen the impact. Must be larger than zero.
profile : Profile
The profile that occurs at ``t_anchor``. Allowed profile types are
described in the :ref:`ref-profiles` section.
t_anchor : :class:`pandas.Timestamp` | str
The timestamp at which ``profile`` occurs. The exact meaning of
``t_anchor`` depends on the implementation details of the underlying
``profile``, but typically refers to its mode.
'''
def to_dict(self: Self) -> dict[str, Any]:
'''
Converts the single event regressor to a JSON-serializable dictionary.
Returns
-------
dict[str, Any]
Dictionary containing all regressor fields including an extra
``regressor_type = "SingleEvent"`` item.
'''
pass
@classmethod
def from_dict(cls: Type[Self], regressor_dict: dict[str, Any]) -> Self:
'''
Creates an SingleEvent object from a dictionary.
The key-value pairs of the dictionary must correspond to the
constructor arguments of the regressor.
Parameters
----------
regressor_dict : dict[str, Any]
Dictionary containing all regressor fields
Returns
-------
SingleEvent
SingleEvent regressor instance with fields from ``regressor_dict``
'''
pass
def get_impact(self: Self, t: pd.Series) -> float:
'''
Calculate fraction of overall profiles occurring within a timerange.
Parameters
----------
t : :class:`pandas.Series`
A series of :class:`pandas.Timestamp`.
Returns
-------
impact : float
Fraction of overall profiles occurring between minimum and maximum
date of ``t``.
'''
pass
def make_feature(self: Self, t: pd.Series, regressor: Optional[pd.Series]=None) -> tuple[pd.DataFrame, dict]:
'''
Create the feature matrix for the single event regressor.
Parameters
----------
t : :class:`pandas.Series`
A series of :class:`pandas.Timestamp` at which the regressor has to
be evaluated
regressor : :class:`pandas.Series`
Contains the values for the regressor that will be added to the
feature matrix unchanged. Only has effect for
:class:`ExternalRegressor`. Any input will be ignored for
:class:`SingleEvent`.
Returns
-------
X : :class:`pandas.DataFrame`
The feature matrix containing the data of the regressor.
prior_scales : dict
A map for ``feature matrix column name`` → ``prior_scale``.
'''
pass
| 6
| 5
| 23
| 3
| 6
| 15
| 1
| 3.08
| 1
| 8
| 1
| 1
| 3
| 0
| 4
| 93
| 125
| 19
| 26
| 13
| 18
| 80
| 18
| 10
| 13
| 1
| 7
| 0
| 4
|
328,391
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/utilities/constants.py
|
gloria.utilities.constants.FitDefaults
|
from typing import Literal, Optional, TypedDict
class FitDefaults(TypedDict):
optimize_mode: Literal['MAP', 'MLE']
use_laplace: bool
capacity: Optional[int]
capacity_mode: Optional[str]
capacity_value: Optional[float]
|
class FitDefaults(TypedDict):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0
| 6
| 1
| 5
| 0
| 6
| 1
| 5
| 0
| 1
| 0
| 0
|
328,392
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/utilities/constants.py
|
gloria.utilities.constants.LoadDataDefaults
|
from typing import Literal, Optional, TypedDict
from gloria.utilities.types import DTypeKind
class LoadDataDefaults(TypedDict):
source: str
dtype_kind: DTypeKind
|
class LoadDataDefaults(TypedDict):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0
| 3
| 1
| 2
| 0
| 3
| 1
| 2
| 0
| 1
| 0
| 0
|
328,393
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/utilities/constants.py
|
gloria.utilities.constants.PredictDefaults
|
from typing import Literal, Optional, TypedDict
class PredictDefaults(TypedDict):
periods: int
include_history: bool
|
class PredictDefaults(TypedDict):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0
| 3
| 1
| 2
| 0
| 3
| 1
| 2
| 0
| 1
| 0
| 0
|
328,394
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/utilities/errors.py
|
gloria.utilities.errors.FittedError
|
from typing_extensions import Self
from typing import Optional
class FittedError(RuntimeError):
"""
Raised when an operation expects an unfitted Gloria instance but got a
fitted one.
"""
def __init__(self: Self, message: Optional[str]=None) -> None:
if message is None:
message = 'Gloria model has been fit before.'
super().__init__(message)
|
class FittedError(RuntimeError):
'''
Raised when an operation expects an unfitted Gloria instance but got a
fitted one.
'''
def __init__(self: Self, message: Optional[str]=None) -> None:
pass
| 2
| 1
| 4
| 0
| 4
| 0
| 2
| 0.8
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 12
| 10
| 1
| 5
| 2
| 3
| 4
| 5
| 2
| 3
| 2
| 4
| 1
| 2
|
328,395
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/utilities/errors.py
|
gloria.utilities.errors.NotFittedError
|
from typing import Optional
from typing_extensions import Self
class NotFittedError(RuntimeError):
"""
Raised when an operation expects a fitted Gloria instance but got an
unfitted one.
"""
def __init__(self: Self, message: Optional[str]=None) -> None:
if message is None:
message = 'Gloria model has not been fit yet.'
super().__init__(message)
|
class NotFittedError(RuntimeError):
'''
Raised when an operation expects a fitted Gloria instance but got an
unfitted one.
'''
def __init__(self: Self, message: Optional[str]=None) -> None:
pass
| 2
| 1
| 4
| 0
| 4
| 0
| 2
| 0.8
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 12
| 10
| 1
| 5
| 2
| 3
| 4
| 5
| 2
| 3
| 2
| 4
| 1
| 2
|
328,396
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/gloria/utilities/logging.py
|
gloria.utilities.logging.LoggingConfig
|
from typing import Any, Callable, Union
from gloria.utilities.constants import _GLORIA_PATH, _RUN_TIMESTAMP
from pathlib import Path
from pydantic import BaseModel, ConfigDict, field_validator
from gloria.utilities.types import LogLevel
class LoggingConfig(BaseModel):
model_config = ConfigDict(validate_assignment=True)
stream_level: LogLevel = 'INFO'
file_level: LogLevel = 'DEBUG'
log_path: Path = _GLORIA_PATH / 'logfiles'
write_logfile: bool = True
@field_validator('log_path', mode='before')
@classmethod
def validate_log_path(cls, log_path: Union[Path, str]) -> Path:
try:
log_path = Path(log_path)
except Exception as e:
raise ValueError(f'Cannot convert log_path input {log_path} to a path.') from e
return log_path
|
class LoggingConfig(BaseModel):
@field_validator('log_path', mode='before')
@classmethod
def validate_log_path(cls, log_path: Union[Path, str]) -> Path:
pass
| 4
| 0
| 8
| 0
| 8
| 0
| 2
| 0.06
| 1
| 4
| 0
| 0
| 0
| 0
| 1
| 83
| 21
| 2
| 18
| 9
| 14
| 1
| 12
| 7
| 10
| 2
| 5
| 1
| 2
|
328,397
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/setup.py
|
setup.BuildModels
|
import os
from setuptools.command.build_ext import build_ext
class BuildModels(build_ext):
"""Custom build command to pre-compile Stan models."""
def run(self) -> None:
if not self.dry_run:
target_dir = os.path.join(self.build_lib, MODEL_DIR)
self.mkpath(target_dir)
build_models(target_dir)
|
class BuildModels(build_ext):
'''Custom build command to pre-compile Stan models.'''
def run(self) -> None:
pass
| 2
| 1
| 5
| 0
| 5
| 0
| 2
| 0.17
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 70
| 8
| 1
| 6
| 3
| 4
| 1
| 6
| 3
| 4
| 2
| 3
| 1
| 2
|
328,398
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/setup.py
|
setup.CleanModels
|
import os
from distutils.command.clean import clean
class CleanModels(clean):
"""Custom clean command to remove pre-compile Stan models."""
def run(self) -> None:
if not self.dry_run:
target_dir = os.path.join(self.build_lib, MODEL_DIR)
clean_models(target_dir)
clean_models(MODEL_DIR)
super().run()
|
class CleanModels(clean):
'''Custom clean command to remove pre-compile Stan models.'''
def run(self) -> None:
pass
| 2
| 1
| 6
| 0
| 6
| 0
| 2
| 0.14
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 34
| 9
| 1
| 7
| 3
| 5
| 1
| 7
| 3
| 5
| 2
| 2
| 1
| 2
|
328,399
|
e-dyn/gloria
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/e-dyn_gloria/setup.py
|
setup.WheelABINone
|
from wheel.bdist_wheel import bdist_wheel
from typing import Tuple
class WheelABINone(bdist_wheel):
def finalize_options(self) -> None:
bdist_wheel.finalize_options(self)
self.root_is_pure = False
def get_tag(self) -> Tuple[str, str, str]:
_, _, plat = bdist_wheel.get_tag(self)
return ('py3', 'none', plat)
|
class WheelABINone(bdist_wheel):
def finalize_options(self) -> None:
pass
def get_tag(self) -> Tuple[str, str, str]:
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 11
| 8
| 1
| 7
| 5
| 4
| 0
| 7
| 5
| 4
| 1
| 2
| 0
| 2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.