From eafb74ba615f4df288f11c980d8f46736c2e2667 Mon Sep 17 00:00:00 2001 From: CodeLionX Date: Mon, 27 May 2024 15:34:35 +0200 Subject: [PATCH 1/2] feat: add integration code for restricted algorithm implementations --- arima/.gitignore | 151 +++++++++++++++++++++++++++++++++ arima/Dockerfile | 11 +++ arima/README.md | 3 +- arima/algorithm.py | 128 ++++++++++++++++++++++++++++ arima/requirements.txt | 15 ++++ multi_norma/.gitignore | 151 +++++++++++++++++++++++++++++++++ multi_norma/Dockerfile | 15 ++++ multi_norma/README.md | 11 +++ multi_norma/algorithm.py | 101 ++++++++++++++++++++++ multi_norma/requirements.txt | 5 ++ norma/.gitignore | 152 ++++++++++++++++++++++++++++++++++ norma/Dockerfile | 42 ++++++++++ norma/README.md | 2 + norma/algorithm.py | 77 +++++++++++++++++ norma/requirements.txt | 3 + sand/.gitignore | 151 +++++++++++++++++++++++++++++++++ sand/Dockerfile | 45 ++++++++++ sand/README.md | 2 + sand/algorithm.py | 126 ++++++++++++++++++++++++++++ sand/requirements.txt | 5 ++ series2graph/.dockerignore | 5 ++ series2graph/.gitignore | 151 +++++++++++++++++++++++++++++++++ series2graph/Dockerfile | 19 +++++ series2graph/README.md | 2 + series2graph/algorithm.py | 93 +++++++++++++++++++++ series2graph/requirements.txt | 6 ++ ssa/.gitignore | 151 +++++++++++++++++++++++++++++++++ ssa/Dockerfile | 11 +++ ssa/README.md | 3 +- ssa/algorithm.py | 82 ++++++++++++++++++ ssa/requirements.txt | 15 ++++ 31 files changed, 1732 insertions(+), 2 deletions(-) create mode 100644 arima/.gitignore create mode 100644 arima/Dockerfile create mode 100644 arima/algorithm.py create mode 100644 arima/requirements.txt create mode 100644 multi_norma/.gitignore create mode 100644 multi_norma/Dockerfile create mode 100644 multi_norma/algorithm.py create mode 100644 multi_norma/requirements.txt create mode 100644 norma/.gitignore create mode 100644 norma/Dockerfile create mode 100644 norma/algorithm.py create mode 100644 norma/requirements.txt create mode 100644 sand/.gitignore create mode 100644 sand/Dockerfile create mode 100644 sand/algorithm.py create mode 100644 sand/requirements.txt create mode 100644 series2graph/.dockerignore create mode 100644 series2graph/.gitignore create mode 100644 series2graph/Dockerfile create mode 100644 series2graph/algorithm.py create mode 100755 series2graph/requirements.txt create mode 100644 ssa/.gitignore create mode 100644 ssa/Dockerfile create mode 100644 ssa/algorithm.py create mode 100644 ssa/requirements.txt diff --git a/arima/.gitignore b/arima/.gitignore new file mode 100644 index 0000000..c654270 --- /dev/null +++ b/arima/.gitignore @@ -0,0 +1,151 @@ +# ingore protected source code +ptsa/ +setup.py + +######################################## +# Python.gitignore from github/gitignore +######################################## + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +######################################## + + +**.png diff --git a/arima/Dockerfile b/arima/Dockerfile new file mode 100644 index 0000000..4e25a68 --- /dev/null +++ b/arima/Dockerfile @@ -0,0 +1,11 @@ +FROM registry.gitlab.hpi.de/akita/i/python3-base:0.2.5 + +LABEL maintainer="thorsten.papenbrock@hpi.de" + +ENV ALGORITHM_MAIN="/app/algorithm.py" + +COPY requirements.txt /app/ +RUN pip install -r /app/requirements.txt; + +COPY ptsa /app/ptsa +COPY algorithm.py /app/ diff --git a/arima/README.md b/arima/README.md index 1d164e9..35829f6 100644 --- a/arima/README.md +++ b/arima/README.md @@ -8,9 +8,10 @@ | Source Code | https://github.com/johnpaparrizos/AnomalyDetection/tree/master/code/ptsa | | Learning type | unsupervised | | Input dimensionality | univariate | - ||| +After receiving the original source code from the authors, place the directory `ptsa` into this folder. + ## Notes The ptsa algorithms require sklearn in version 19 to 23. This is checked in the utility.py. Our python image, however, uses a newer sklearn version, which is 24.1 or higher. Hence we removed the check: diff --git a/arima/algorithm.py b/arima/algorithm.py new file mode 100644 index 0000000..01284f2 --- /dev/null +++ b/arima/algorithm.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python3 + +import json +import sys +import argparse +import numpy as np + +from dataclasses import dataclass + +from ptsa.models.arima import ARIMA +from ptsa.models.distance import Euclidean +from ptsa.models.distance import Mahalanobis +from ptsa.models.distance import Garch +from ptsa.models.distance import SSA +from ptsa.models.distance import Fourier +from ptsa.models.distance import DTW +from ptsa.models.distance import EDRS +from ptsa.models.distance import TWED + + +@dataclass +class CustomParameters: + window_size: int = 20 + max_lag: int = 30000 + p_start: int = 1 + q_start: int = 1 + max_p: int = 5 + max_q: int = 5 + differencing_degree: int = 0 + distance_metric: str = "Euclidean" + random_state: int = 42 # seed for randomness + + +class AlgorithmArgs(argparse.Namespace): + @staticmethod + def from_sys_args() -> 'AlgorithmArgs': + if len(sys.argv) != 2: + raise ValueError("Wrong number of arguments specified! Single JSON-string pos. argument expected.") + args: dict = json.loads(sys.argv[1]) + custom_parameter_keys = dir(CustomParameters()) + filtered_parameters = dict(filter(lambda x: x[0] in custom_parameter_keys, args.get("customParameters", {}).items())) + args["customParameters"] = CustomParameters(**filtered_parameters) + return AlgorithmArgs(**args) + + +def set_random_state(config: AlgorithmArgs) -> None: + seed = config.customParameters.random_state + import random + random.seed(seed) + np.random.seed(seed) + + +def distance_to_measure(distance_metric): + switcher = { + "euclidean": Euclidean(), + "mahalanobis": Mahalanobis(), + "garch": Garch(), + "ssa": SSA(), + "fourier": Fourier(), + "dtw": DTW(), + "edrs": EDRS(), + "twed": TWED() + } + return switcher.get(distance_metric.lower(), "missing") + + +def main(): + config = AlgorithmArgs.from_sys_args() + ts_filename = config.dataInput # "/data/dataset.csv" + score_filename = config.dataOutput # "/results/anomaly_window_scores.ts" + + print(f"Configuration: {config}") + + if config.executionType == "train": + print("No training required!") + exit(0) + + if config.executionType != "execute": + raise ValueError("Unknown executionType specified!") + + set_random_state(config) + + # read only single "value" column from dataset + print(f"Reading data from {ts_filename}") + da = np.genfromtxt(ts_filename, skip_header=1, delimiter=",") + data = da[:, 1] + labels = da[:, -1] + length = len(data) + contamination = labels.sum() / length + # Use smallest positive float as contamination if there are no anomalies in dataset + contamination = np.nextafter(0, 1) if contamination == 0. else contamination + + # run ARIMA + print("Executing ARIMA ...") + model = ARIMA( + window=config.customParameters.window_size, + max_lag=config.customParameters.max_lag, + p_start=config.customParameters.p_start, + q_start=config.customParameters.q_start, + max_p=config.customParameters.max_p, + max_q=config.customParameters.max_q, + d=config.customParameters.differencing_degree, + contamination=contamination, + neighborhood="all") + model.fit(data) + + # get outlier scores + measure = distance_to_measure(config.customParameters.distance_metric) + if measure == "missing": + raise ValueError(f"Distance measure '{config.customParameters.distance_metric}' not supported!") + measure.detector = model + measure.set_param() + model.decision_function(measure=measure) + scores = model.decision_scores_ + + #from ptsa.utils.metrics import metricor + #grader = metricor() + #preds = grader.scale(scores, 0.1) + + print(f"Input size: {len(data)}\nOutput size: {len(scores)}") + print("ARIMA result:", scores) + + print(f"Writing results to {score_filename}") + np.savetxt(score_filename, scores, delimiter=",") + + +if __name__ == "__main__": + main() diff --git a/arima/requirements.txt b/arima/requirements.txt new file mode 100644 index 0000000..cb84d74 --- /dev/null +++ b/arima/requirements.txt @@ -0,0 +1,15 @@ +combo +joblib +matplotlib +numpy>=1.13 +numba>=0.35 +pandas>=0.25 +scipy>=0.19.1 +scikit_learn>=0.19.1 +six +statsmodels +suod +pmdarima +arch +tsfresh +hurst diff --git a/multi_norma/.gitignore b/multi_norma/.gitignore new file mode 100644 index 0000000..0306453 --- /dev/null +++ b/multi_norma/.gitignore @@ -0,0 +1,151 @@ +# Exclude protected source code +multinormats/ +setup.py + +######################################## +# Python.gitignore from github/gitignore +######################################## + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +######################################## + + +**.png diff --git a/multi_norma/Dockerfile b/multi_norma/Dockerfile new file mode 100644 index 0000000..a319238 --- /dev/null +++ b/multi_norma/Dockerfile @@ -0,0 +1,15 @@ +FROM ghcr.io/timeeval/python3-base:0.3.0 + +LABEL maintainer="ben-noah.engelhaupt@student.hpi.de,leo.wendt@student.hpi.de" + +ENV ALGORITHM_MAIN="/app/algorithm.py" + +COPY ./requirements.txt . +RUN pip install -r requirements.txt + +COPY setup.py . +COPY multinormats ./multinormats +RUN python setup.py install + +COPY algorithm.py . +COPY manifest.json . diff --git a/multi_norma/README.md b/multi_norma/README.md index 2c7efba..9101703 100644 --- a/multi_norma/README.md +++ b/multi_norma/README.md @@ -10,6 +10,17 @@ | Input dimensionality | multivariate | ||| +There are two parts to getting access to this algorithm: + +1. You get the original NormA implementation from its original authors (see [NormA](../norma/AVAILABILITY.md)). +2. You get the adaption of the algorithm to multivariate datasets from us after having received the original NormA implementation. + We need the original authors' consent to share the code because we re-use significant parts of it. + +After receiving the code from us, place the `multinormats`-folder within this folder. +From the original NormA algorithm, we just require the library files. +Place the original `normats.lib`-folder within the new `multinormats`-folder. +The C-code and the matrix profile implementation are not required anymore because we switched to stumpy for MultiNormA. + ## Original Dependencies - python==3.6 diff --git a/multi_norma/algorithm.py b/multi_norma/algorithm.py new file mode 100644 index 0000000..3950519 --- /dev/null +++ b/multi_norma/algorithm.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python3 +import json +import sys +from argparse import Namespace +from dataclasses import dataclass + +import numpy as np + +from multinormats import MultiNormA + + +@dataclass +class CustomParameters: + anomaly_window_size: int = 20 + normal_model_percentage: float = 0.5 + max_motifs: int = 4096 + random_state: int = 42 + motif_detection: str = "mixed" + sum_dims: bool = False + normalize_join: bool = True + join_combine_method: int = 1 + + +class AlgorithmArgs(Namespace): + @staticmethod + def from_sys_args() -> 'AlgorithmArgs': + args: dict = json.loads(sys.argv[1]) + custom_parameter_keys = dir(CustomParameters()) + filtered_parameters = dict( + filter(lambda x: x[0] in custom_parameter_keys, args.get("customParameters", {}).items())) + args["customParameters"] = CustomParameters(**filtered_parameters) + return AlgorithmArgs(**args) + + +def set_random_state(config: AlgorithmArgs) -> None: + seed = config.customParameters.random_state + import random + random.seed(seed) + np.random.seed(seed) + + +def main(): + config = AlgorithmArgs.from_sys_args() + ts_filename = config.dataInput # "/data/dataset.csv" + score_filename = config.dataOutput # "/results/anomaly_window_scores.ts" + execution_type = config.executionType + # we ignore model paths, because they are not required + window_size = config.customParameters.anomaly_window_size + normal_model_percentage = config.customParameters.normal_model_percentage + max_motifs = config.customParameters.max_motifs + motif_detection = config.customParameters.motif_detection + if motif_detection not in ["stomp", "random", "mixed"]: + raise ValueError(f"motif_detection (={motif_detection}) must be one of [stomp,random,mixed]!") + sum_dims = config.customParameters.sum_dims + normalize_join = config.customParameters.normalize_join + join_combine_method = config.customParameters.join_combine_method + + # postprocessing window_size = 2 * (window_size - 1) + 1 + + set_random_state(config) + + print(f"Configuration: {config}") + + if execution_type == "train": + print("No training required!") + exit(0) + elif execution_type != "execute": + raise ValueError(f"Unknown execution type '{execution_type}'; expected either 'train' or 'execute'!") + + # read only single "value" column from dataset + print(f"Reading data from {ts_filename}") + with open(ts_filename, 'r') as f: + num_cols = len(f.readline().split(",")) + f.close() + + data = np.genfromtxt(ts_filename, skip_header=1, delimiter=",", usecols=range(1, num_cols - 1)) + length = len(data) + + # save as a new file to pass to NormA + ts_transformed_name = f"transformed.csv" + np.savetxt(ts_transformed_name, data, delimiter=",") + + # window_size = window_size + np.random.randint(0, 3 * window_size) + window_size = max(10, window_size) + + # Run NormA + print("Executing MultiNormA ...") + norma = MultiNormA(pattern_length=window_size, nm_size=3 * window_size, motif_detection=motif_detection, + sum_dims=sum_dims, apply_normalize_join=normalize_join, + combine_method=join_combine_method) + scores = norma.run_motif(ts_transformed_name, tot_length=length, percentage_sel=normal_model_percentage, + max_motifs=max_motifs) + print(f"Input size: {len(data)}\nOutput size: {len(scores)}") + print("MultiNormA (random NM) result:", scores) + + print(f"Writing results to {score_filename}") + np.savetxt(score_filename, scores, delimiter=",") + + +if __name__ == "__main__": + main() diff --git a/multi_norma/requirements.txt b/multi_norma/requirements.txt new file mode 100644 index 0000000..e7afd6c --- /dev/null +++ b/multi_norma/requirements.txt @@ -0,0 +1,5 @@ +numpy +pandas +scipy +psutil +stumpy diff --git a/norma/.gitignore b/norma/.gitignore new file mode 100644 index 0000000..dfc80c8 --- /dev/null +++ b/norma/.gitignore @@ -0,0 +1,152 @@ +# Exclude protected source code +C/ +normats/ +setup.py + +######################################## +# Python.gitignore from github/gitignore +######################################## + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +######################################## + + +**.png diff --git a/norma/Dockerfile b/norma/Dockerfile new file mode 100644 index 0000000..bb66221 --- /dev/null +++ b/norma/Dockerfile @@ -0,0 +1,42 @@ +FROM registry.gitlab.hpi.de/akita/i/python3-base:0.2.5 as base + +LABEL maintainer="sebastian.schmidl@hpi.de" + +ENV ALGORITHM_MAIN="/app/algorithm.py" + +# install C dependencies +RUN set -eux; \ + apt-get update; \ + apt-get install -y --no-install-recommends libfftw3-3; \ + apt-get clean; \ + rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + + +# Separate build image! +#---------------------------- +FROM base as build + +# install build dependencies +RUN set -eux; \ + apt-get update; \ + apt-get install -y --no-install-recommends libfftw3-dev build-essential; \ + apt-get clean; \ + rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +RUN python -m venv --system-site-packages /opt/venv +ENV PATH="/opt/venv/bin:$PATH" + +# build norm and its dependencies +COPY C ./C +COPY setup.py . +COPY normats ./normats +RUN python setup.py install +#---------------------------- + + +FROM base + +COPY --from=build /opt/venv /opt/venv +COPY algorithm.py . + +ENV PATH="/opt/venv/bin:$PATH" diff --git a/norma/README.md b/norma/README.md index 24fc7dd..38cbe51 100644 --- a/norma/README.md +++ b/norma/README.md @@ -10,6 +10,8 @@ | Input dimensionality | univariate | ||| +After receiving the original source code from the authors, place the directories `C` and `normats` into this folder. + ## Original Dependencies - python==3.6 diff --git a/norma/algorithm.py b/norma/algorithm.py new file mode 100644 index 0000000..5e1fa35 --- /dev/null +++ b/norma/algorithm.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python3 +import json +import sys +from argparse import Namespace + +import numpy as np + +from dataclasses import dataclass +from normats import NormA + + +@dataclass +class CustomParameters: + anomaly_window_size: int = 20 + normal_model_percentage: float = 0.5 + random_state: int = 42 + + +class AlgorithmArgs(Namespace): + @staticmethod + def from_sys_args() -> 'AlgorithmArgs': + args: dict = json.loads(sys.argv[1]) + custom_parameter_keys = dir(CustomParameters()) + filtered_parameters = dict(filter(lambda x: x[0] in custom_parameter_keys, args.get("customParameters", {}).items())) + args["customParameters"] = CustomParameters(**filtered_parameters) + return AlgorithmArgs(**args) + + +def set_random_state(config: AlgorithmArgs) -> None: + seed = config.customParameters.random_state + import random + random.seed(seed) + np.random.seed(seed) + + +def main(): + config = AlgorithmArgs.from_sys_args() + ts_filename = config.dataInput # "/data/dataset.csv" + score_filename = config.dataOutput # "/results/anomaly_window_scores.ts" + execution_type = config.executionType + # we ignore model paths, because they are not required + window_size = config.customParameters.anomaly_window_size + normal_model_percentage = config.customParameters.normal_model_percentage + # postprocessing window_size = 2 * (window_size - 1) + 1 + + set_random_state(config) + + print(f"Configuration: {config}") + + if execution_type == "train": + print("No training required!") + exit(0) + elif execution_type != "execute": + raise ValueError(f"Unknown execution type '{execution_type}'; expected either 'train' or 'execute'!") + + # read only single "value" column from dataset + print(f"Reading data from {ts_filename}") + data = np.genfromtxt(ts_filename, skip_header=1, delimiter=",", usecols=(1,)) + length = len(data) + + # save as a new file to pass to NormA + ts_transformed_name = f"transformed.csv" + np.savetxt(ts_transformed_name, data, delimiter=",") + + # Run NomrA + print("Executing NormA ...") + norma = NormA(pattern_length=window_size, nm_size=3 * window_size) + scores = norma.run_motif(ts_transformed_name, tot_length=length, percentage_sel=normal_model_percentage) + print(f"Input size: {len(data)}\nOutput size: {len(scores)}") + print("NormA (random NM) result:", scores) + + print(f"Writing results to {score_filename}") + np.savetxt(score_filename, scores, delimiter=",") + + +if __name__ == "__main__": + main() diff --git a/norma/requirements.txt b/norma/requirements.txt new file mode 100644 index 0000000..1c813b6 --- /dev/null +++ b/norma/requirements.txt @@ -0,0 +1,3 @@ +numpy +pandas +scipy diff --git a/sand/.gitignore b/sand/.gitignore new file mode 100644 index 0000000..dc56aa9 --- /dev/null +++ b/sand/.gitignore @@ -0,0 +1,151 @@ +# ingore protected source code +matrix_profile/ +SAND.py + +######################################## +# Python.gitignore from github/gitignore +######################################## + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +######################################## + + +**.png diff --git a/sand/Dockerfile b/sand/Dockerfile new file mode 100644 index 0000000..a2953c9 --- /dev/null +++ b/sand/Dockerfile @@ -0,0 +1,45 @@ +FROM registry.gitlab.hpi.de/akita/i/python3-base:0.2.5 as base + +LABEL maintainer="sebastian.schmidl@hpi.de" + +ENV ALGORITHM_MAIN="/app/algorithm.py" + +# install C dependencies +RUN set -eux; \ + apt-get update; \ + apt-get install -y --no-install-recommends libfftw3-3; \ + apt-get clean; \ + rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + + +# Separate build image! +#---------------------------- +FROM base as build + +# install build dependencies +RUN set -eux; \ + apt-get update; \ + apt-get install -y --no-install-recommends libfftw3-dev build-essential; \ + apt-get clean; \ + rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +RUN python -m venv --system-site-packages /opt/venv +ENV PATH="/opt/venv/bin:$PATH" + +# build and install matrix profile lib +COPY matrix_profile /matrix_profile +RUN cd /matrix_profile && python setup.py install +#---------------------------- + + +FROM base + +ENV PATH="/opt/venv/bin:$PATH" + +COPY --from=build /opt/venv /opt/venv + +COPY requirements.txt . +RUN pip install -r requirements.txt + +COPY SAND.py . +COPY algorithm.py . diff --git a/sand/README.md b/sand/README.md index fba553d..1ed379c 100644 --- a/sand/README.md +++ b/sand/README.md @@ -10,6 +10,8 @@ | Input dimensionality | univariate | ||| +After receiving the original source code from the authors, place the directory `matrix_profile` and the file `SAND.py` into this folder. + ## Original Dependencies - numpy==1.18.5 diff --git a/sand/algorithm.py b/sand/algorithm.py new file mode 100644 index 0000000..3cd140d --- /dev/null +++ b/sand/algorithm.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 +import json +import sys +from argparse import Namespace + +import pandas as pd +import numpy as np + +from dataclasses import dataclass +from SAND import SAND + + +@dataclass +class CustomParameters: + anomaly_window_size: int = 75 + n_clusters: int = 6 + n_init_train: int = 2000 + iter_batch_size: int = 500 + alpha: float = 0.5 + random_state: int = 42 + use_column_index: int = 0 + + +class AlgorithmArgs(Namespace): + @staticmethod + def from_sys_args() -> 'AlgorithmArgs': + args: dict = json.loads(sys.argv[1]) + custom_parameter_keys = dir(CustomParameters()) + filtered_parameters = dict(filter(lambda x: x[0] in custom_parameter_keys, args.get("customParameters", {}).items())) + args["customParameters"] = CustomParameters(**filtered_parameters) + return AlgorithmArgs(**args) + + +def set_random_state(config: AlgorithmArgs) -> None: + seed = config.customParameters.random_state + import random + random.seed(seed) + np.random.seed(seed) + + +def load_data(config: AlgorithmArgs) -> np.ndarray: + df = pd.read_csv(config.dataInput) + column_index = 0 + if config.customParameters.use_column_index is not None: + column_index = config.customParameters.use_column_index + max_column_index = df.shape[1] - 3 + if column_index > max_column_index: + print(f"Selected column index {column_index} is out of bounds (columns = {df.columns.values}; " + f"max index = {max_column_index} [column '{df.columns[max_column_index + 1]}'])! " + "Using last channel!", file=sys.stderr) + column_index = max_column_index + # jump over index column (timestamp) + column_index += 1 + + return df.iloc[:, column_index].values + + +def main(config: AlgorithmArgs): + set_random_state(config) + data = load_data(config) + scores = np.full_like(data, fill_value=np.nan) + print(f"Data shape: {data.shape}") + + # empirically shown best value + subsequence_length = 3 * config.customParameters.anomaly_window_size + # Take subsequence every 'overlaping_rate' points + # Change it to 1 for completely overlapping subsequences + # Change it to 'subsequence_length' for non-overlapping subsequences + # Change it to 'subsequence_length//4' for non-trivial matching subsequences + # --> use non-trivial matching, but guard against + overlaping_rate = max(subsequence_length//4, 1) + init_size = config.customParameters.n_init_train + batch_size = config.customParameters.iter_batch_size + + print(f"Initializing on first {init_size} points") + sand = SAND(data, + k=config.customParameters.n_clusters, + init_length=init_size, + batch_size=batch_size, + pattern_length=config.customParameters.anomaly_window_size, + subsequence_length=subsequence_length, + alpha=config.customParameters.alpha, + overlaping_rate=overlaping_rate, + ) + sand.initialize() + + # patch sand to compute scores for all initial points (not only the last batch_size) + sand.batch_size = init_size + scores[:init_size] = sand.compute_score() + + # remove sand patch: reset batch size + sand.batch_size = batch_size + + i = 0 + while sand.current_time < len(data): + start = init_size + i*batch_size + end = min(init_size + (i+1)*batch_size, len(data)) + + if start+subsequence_length >= len(data): + print(f"Last batch {i} is too small ({end-start} < {subsequence_length}), skipping") + break + + print(f"Computing batch {i} ({start}-{end})") + sand.run_next_batch() + scores[start:end] = sand.compute_score() + i += 1 + + print("Storing scores") + print(f"Scores shape: {scores.shape}") + np.savetxt(config.dataOutput, scores, delimiter=",") + + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("Wrong number of arguments specified; expected a single json-string!") + exit(1) + + config = AlgorithmArgs.from_sys_args() + print(f"Config: {config}") + + if config.executionType == "train": + print("Nothing to train, finished!") + elif config.executionType == "execute": + main(config) + else: + raise ValueError(f"Unknown execution type '{config.executionType}'; expected either 'train' or 'execute'!") diff --git a/sand/requirements.txt b/sand/requirements.txt new file mode 100644 index 0000000..5cfd5c1 --- /dev/null +++ b/sand/requirements.txt @@ -0,0 +1,5 @@ +numpy +pandas +matplotlib +tslearn +plotly diff --git a/series2graph/.dockerignore b/series2graph/.dockerignore new file mode 100644 index 0000000..c8e87d5 --- /dev/null +++ b/series2graph/.dockerignore @@ -0,0 +1,5 @@ +__pycache__/ +**.png +README.md +setup.py +.gitignore diff --git a/series2graph/.gitignore b/series2graph/.gitignore new file mode 100644 index 0000000..765d057 --- /dev/null +++ b/series2graph/.gitignore @@ -0,0 +1,151 @@ +# ingore protected source code +series2graph/ +setup.py + +######################################## +# Python.gitignore from github/gitignore +######################################## + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +######################################## + + +**.png diff --git a/series2graph/Dockerfile b/series2graph/Dockerfile new file mode 100644 index 0000000..5000d02 --- /dev/null +++ b/series2graph/Dockerfile @@ -0,0 +1,19 @@ +FROM registry.gitlab.hpi.de/akita/i/python3-base:0.2.5 + +LABEL maintainer="sebastian.schmidl@hpi.de" + +ENV ALGORITHM_MAIN="/app/algorithm.py" + +# install requirements and cleanup afterwards (also removes tests and cached cython files of the dependencies) +COPY requirements.txt /tmp/ +RUN set -eux; \ + pip install --no-cache-dir -r /tmp/requirements.txt; \ + find /usr/local -depth \ + \( \ + \( -type d -a \( -name test -o -name tests -o -name idle_test \) \) \ + -o \ + \( -type f -a \( -name '*.pyc' -o -name '*.pyo' \) \) \ + \) -exec rm -rf '{}' +; \ + rm -rf /tmp/* /var/tmp/* ~/.cache/pip + +COPY . /app/ diff --git a/series2graph/README.md b/series2graph/README.md index 33ed643..76dece2 100644 --- a/series2graph/README.md +++ b/series2graph/README.md @@ -10,6 +10,8 @@ | Input dimensionality | univariate | ||| +After receiving the original source code from the authors, place the directory `series2graph` into this folder. + ## Original Dependencies - python=3 diff --git a/series2graph/algorithm.py b/series2graph/algorithm.py new file mode 100644 index 0000000..6937d97 --- /dev/null +++ b/series2graph/algorithm.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python +import sys, json +import numpy as np +from series2graph import Series2Graph +from pathlib import Path + + +class Config: + dataInput: Path + dataOutput: Path + executionType: str + l: int + ql: int + latent: int + rate: int + random_state: int + + def __init__(self, params): + self.dataInput = Path(params.get("dataInput", "/data/dataset.csv")) + self.dataOutput = Path(params.get("dataOutput", "/results/anomaly_window_scores.ts")) + self.executionType = params.get("executionType", "execute") + # ignore modelInput and modelOutput, because it is not needed + try: + customParameters = params["customParameters"] + except KeyError: + customParameters = {} + self.l = customParameters.get("window_size", 50) + self.ql = customParameters.get("query_window_size", 75) + self.latent = self.l // 3 + self.rate = customParameters.get("rate", 30) + self.random_state = customParameters.get("random_state", 42) + + def __str__(self): + return f"Config("\ + f"dataInput={self.dataInput}, dataOutput={self.dataOutput}, executionType={self.executionType}," \ + f"l={self.l}, ql={self.ql}, latent={self.latent}, rate={self.rate})" + + +class TS(): + def __init__(self, vs): + self.values = vs + + def __repr__(self): + return f"TS({self.values})" + + +def load_ts(filename): + values = np.genfromtxt(filename, skip_header=1, delimiter=",", usecols=(1,)) + return [TS(values)] + + +def main(config): + ts = load_ts(config.dataInput) + print(f"Read input time series from {config.dataInput}:", ts) + + s2g = Series2Graph(pattern_length=config.l, latent=config.latent, rate=config.rate) + s2g.fit(ts) + s2g.score(query_length = config.ql) + + print("len(ts):", len(ts[0].values)) + print("len(scores):", len(s2g.all_score)) + print(f"Anomaly window scores written to {config.dataOutput}:", s2g.all_score) + np.savetxt(config.dataOutput, s2g.all_score, delimiter=",") + + +def parse_args(): + if len(sys.argv) < 2: + print("No arguments supplied, using default arguments!", file=sys.stderr) + params = {} + elif len(sys.argv) > 2: + print("Wrong number of arguments supplied! Single JSON-String expected!", file=sys.stderr) + exit(1) + else: + params = json.loads(sys.argv[1]) + return Config(params) + + +def set_random_state(config) -> None: + seed = config.random_state + import random + random.seed(seed) + np.random.seed(seed) + + +if __name__ == "__main__": + config = parse_args() + print(config) + set_random_state(config) + if config.executionType == "train": + print("No training required!") + exit(0) + else: + main(config) diff --git a/series2graph/requirements.txt b/series2graph/requirements.txt new file mode 100755 index 0000000..7107ee6 --- /dev/null +++ b/series2graph/requirements.txt @@ -0,0 +1,6 @@ +numpy +pandas +matplotlib +scikit-learn +scipy +networkx diff --git a/ssa/.gitignore b/ssa/.gitignore new file mode 100644 index 0000000..c654270 --- /dev/null +++ b/ssa/.gitignore @@ -0,0 +1,151 @@ +# ingore protected source code +ptsa/ +setup.py + +######################################## +# Python.gitignore from github/gitignore +######################################## + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +######################################## + + +**.png diff --git a/ssa/Dockerfile b/ssa/Dockerfile new file mode 100644 index 0000000..c4935e2 --- /dev/null +++ b/ssa/Dockerfile @@ -0,0 +1,11 @@ +FROM registry.gitlab.hpi.de/akita/i/python3-base:0.2.5 + +LABEL maintainer="rohan.sawahn@student.hpi.de" + +ENV ALGORITHM_MAIN="/app/algorithm.py" + +COPY requirements.txt /app/ +RUN pip install -r /app/requirements.txt; + +COPY ptsa /app/ptsa +COPY algorithm.py /app/ diff --git a/ssa/README.md b/ssa/README.md index b76c6f4..d2dd238 100644 --- a/ssa/README.md +++ b/ssa/README.md @@ -8,9 +8,10 @@ | Source Code | https://github.com/johnpaparrizos/AnomalyDetection/tree/master/code/ptsa | | Learning type | unsupervised | | Input dimensionality | univariate | - ||| +After receiving the original source code from the authors, place the directory `ptsa` into this folder. + ## Notes SSA works by comparing a reference timeseries to the timeseries that the experiment is being conducted on. diff --git a/ssa/algorithm.py b/ssa/algorithm.py new file mode 100644 index 0000000..ccea745 --- /dev/null +++ b/ssa/algorithm.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python3 + +import json +import sys +import argparse +import numpy as np + +from dataclasses import dataclass +from typing import Union + +from ptsa.models.SSA import SSA + + +@dataclass +class CustomParameters: + ep: int = 3 + window_size: int = 720 + rf_method: str = 'alpha' + alpha: Union[float, int, np.ndarray] = 0.2 + random_state: int = 42 + + +class AlgorithmArgs(argparse.Namespace): + @staticmethod + def from_sys_args() -> 'AlgorithmArgs': + if len(sys.argv) != 2: + raise ValueError("Wrong number of arguments specified! Single JSON-string pos. argument expected.") + args: dict = json.loads(sys.argv[1]) + custom_parameter_keys = dir(CustomParameters()) + filtered_parameters = dict(filter(lambda x: x[0] in custom_parameter_keys, args.get("customParameters", {}).items())) + args["customParameters"] = CustomParameters(**filtered_parameters) + return AlgorithmArgs(**args) + + +def set_random_state(config: AlgorithmArgs) -> None: + seed = config.customParameters.random_state + import random + random.seed(seed) + np.random.seed(seed) + + +def main(): + config = AlgorithmArgs.from_sys_args() + set_random_state(config) + ts_filename = config.dataInput # "/data/dataset.csv" + score_filename = config.dataOutput # "/results/anomaly_window_scores.ts" + + print(f"Configuration: {config}") + + if config.executionType == "train": + print("No training required!") + exit(0) + + if config.executionType != "execute": + raise ValueError("Unknown executionType specified!") + + # read only single column from dataset + print(f"Reading data from {ts_filename}") + da = np.genfromtxt(ts_filename, skip_header=1, delimiter=",") + data = da[:, 1] + + # run SSA + print("Executing SSA ...") + model = SSA(a=config.customParameters.alpha, + ep=config.customParameters.ep, + n=config.customParameters.window_size, + rf_method=config.customParameters.rf_method) + model.fit(data) + + # get outlier scores + scores = model.decision_scores_ + scores = np.roll(scores, -config.customParameters.window_size) + + print(f"Input size: {len(data)}\nOutput size: {len(scores)}") + print("SSA result:", scores) + + print(f"Writing results to {score_filename}") + np.savetxt(score_filename, scores, delimiter=",", fmt='%f') + + +if __name__ == "__main__": + main() diff --git a/ssa/requirements.txt b/ssa/requirements.txt new file mode 100644 index 0000000..cb84d74 --- /dev/null +++ b/ssa/requirements.txt @@ -0,0 +1,15 @@ +combo +joblib +matplotlib +numpy>=1.13 +numba>=0.35 +pandas>=0.25 +scipy>=0.19.1 +scikit_learn>=0.19.1 +six +statsmodels +suod +pmdarima +arch +tsfresh +hurst From d86405c526bda6ce7185ce7189755a89ff0170b3 Mon Sep 17 00:00:00 2001 From: CodeLionX Date: Mon, 27 May 2024 15:39:17 +0200 Subject: [PATCH 2/2] fix: base Dockerimage refs --- arima/Dockerfile | 2 +- norma/Dockerfile | 2 +- sand/Dockerfile | 2 +- series2graph/Dockerfile | 2 +- ssa/Dockerfile | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arima/Dockerfile b/arima/Dockerfile index 4e25a68..7983115 100644 --- a/arima/Dockerfile +++ b/arima/Dockerfile @@ -1,4 +1,4 @@ -FROM registry.gitlab.hpi.de/akita/i/python3-base:0.2.5 +FROM ghcr.io/timeeval/python3-base:0.3.0 LABEL maintainer="thorsten.papenbrock@hpi.de" diff --git a/norma/Dockerfile b/norma/Dockerfile index bb66221..ba28805 100644 --- a/norma/Dockerfile +++ b/norma/Dockerfile @@ -1,4 +1,4 @@ -FROM registry.gitlab.hpi.de/akita/i/python3-base:0.2.5 as base +FROM ghcr.io/timeeval/python3-base:0.3.0 as base LABEL maintainer="sebastian.schmidl@hpi.de" diff --git a/sand/Dockerfile b/sand/Dockerfile index a2953c9..79c5e48 100644 --- a/sand/Dockerfile +++ b/sand/Dockerfile @@ -1,4 +1,4 @@ -FROM registry.gitlab.hpi.de/akita/i/python3-base:0.2.5 as base +FROM ghcr.io/timeeval/python3-base:0.3.0 as base LABEL maintainer="sebastian.schmidl@hpi.de" diff --git a/series2graph/Dockerfile b/series2graph/Dockerfile index 5000d02..c72a215 100644 --- a/series2graph/Dockerfile +++ b/series2graph/Dockerfile @@ -1,4 +1,4 @@ -FROM registry.gitlab.hpi.de/akita/i/python3-base:0.2.5 +FROM ghcr.io/timeeval/python3-base:0.3.0 LABEL maintainer="sebastian.schmidl@hpi.de" diff --git a/ssa/Dockerfile b/ssa/Dockerfile index c4935e2..ada774c 100644 --- a/ssa/Dockerfile +++ b/ssa/Dockerfile @@ -1,4 +1,4 @@ -FROM registry.gitlab.hpi.de/akita/i/python3-base:0.2.5 +FROM ghcr.io/timeeval/python3-base:0.3.0 LABEL maintainer="rohan.sawahn@student.hpi.de"