Skip to content

Commit

Permalink
Merge pull request #28 from databricks-industry-solutions/neuralforec…
Browse files Browse the repository at this point in the history
…ast-update

update neuralforecast
  • Loading branch information
ryuta-yoshimatsu authored May 17, 2024
2 parents 54fee6d + 1bf90a2 commit 1da747e
Show file tree
Hide file tree
Showing 15 changed files with 1,166 additions and 649 deletions.
39 changes: 12 additions & 27 deletions 01_mmf_univariate_daily_demo.py → demo_global_daily.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,28 +77,7 @@ def create_m4_df():
# COMMAND ----------

active_models = [
"StatsForecastBaselineWindowAverage",
#"StatsForecastBaselineSeasonalWindowAverage",
#"StatsForecastBaselineNaive",
#"StatsForecastBaselineSeasonalNaive",
"StatsForecastAutoArima",
#"StatsForecastAutoETS",
#"StatsForecastAutoCES",
#"StatsForecastAutoTheta",
#"StatsForecastTSB",
#"StatsForecastADIDA",
#"StatsForecastIMAPA",
#"StatsForecastCrostonClassic",
#"StatsForecastCrostonOptimized",
#"StatsForecastCrostonSBA",
"RFableArima",
#"RFableETS",
#"RFableNNETAR",
#"RFableEnsemble",
#"RDynamicHarmonicRegression",
"SKTimeTBats",
#"SKTimeLgbmDsDt",
#"NeuralForecastRNN",
"NeuralForecastRNN",
#"NeuralForecastLSTM",
#"NeuralForecastNBEATSx",
#"NeuralForecastNHITS",
Expand Down Expand Up @@ -128,7 +107,8 @@ def create_m4_df():
train_data="mmf_train",
scoring_data="mmf_train",
scoring_output=f"{catalog}.{db}.daily_scoring_output",
metrics_output=f"{catalog}.{db}.daily_metrics_output",
evaluation_output=f"{catalog}.{db}.daily_evaluation_output",
model_output=f"{catalog}.{db}",
group_id="unique_id",
date_col="ds",
target="y",
Expand All @@ -147,16 +127,17 @@ def create_m4_df():
active_models=active_models,
experiment_path=f"/Shared/mmf_experiment",
use_case_name="mmf",
accelerator="gpu",
)

# COMMAND ----------

# MAGIC %md ### Metrics output
# MAGIC In the metrics output table, the metrics for all backtest windows and all models are stored. This info can be used to monitor model performance or decide which models should be taken into the final aggregated forecast.
# MAGIC %md ### Evaluation output
# MAGIC In the evaluation output table, the evaluation for all backtest windows and all models are stored. This info can be used to monitor model performance or decide which models should be taken into the final aggregated forecast.

# COMMAND ----------

# MAGIC %sql select * from solacc_uc.mmf.daily_metrics_output order by unique_id, model, backtest_window_start_date
# MAGIC %sql select * from solacc_uc.mmf.daily_evaluation_output order by unique_id, model, backtest_window_start_date

# COMMAND ----------

Expand All @@ -178,7 +159,11 @@ def create_m4_df():

# COMMAND ----------

# MAGIC %sql delete from solacc_uc.mmf.daily_metrics_output
# MAGIC %md ### Delete Tables

# COMMAND ----------

# MAGIC %sql delete from solacc_uc.mmf.daily_evaluation_output

# COMMAND ----------

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,28 +59,7 @@
from forecasting_sa import run_forecast

active_models = [
#"StatsForecastBaselineWindowAverage",
#"StatsForecastBaselineSeasonalWindowAverage",
#"StatsForecastBaselineNaive",
#"StatsForecastBaselineSeasonalNaive",
#"StatsForecastAutoArima",
#"StatsForecastAutoETS",
#"StatsForecastAutoCES",
#"StatsForecastAutoTheta",
#"StatsForecastTSB",
#"StatsForecastADIDA",
#"StatsForecastIMAPA",
#"StatsForecastCrostonClassic",
#"StatsForecastCrostonOptimized",
#"StatsForecastCrostonSBA",
#"RFableArima",
#"RFableETS",
#"RFableNNETAR",
#"RFableEnsemble",
#"RDynamicHarmonicRegression",
"SKTimeLgbmDsDt",
#"SKTimeTBats",
#"NeuralForecastRNN",
"NeuralForecastRNN",
#"NeuralForecastLSTM",
#"NeuralForecastNBEATSx",
#"NeuralForecastNHITS",
Expand All @@ -95,7 +74,8 @@
train_data=f"{catalog}.{db}.rossmann_train",
scoring_data=f"{catalog}.{db}.rossmann_test",
scoring_output=f"{catalog}.{db}.rossmann_scoring_output",
metrics_output=f"{catalog}.{db}.rossmann_metrics_output",
evaluation_output=f"{catalog}.{db}.rossmann_evaluation_output",
model_output=f"{catalog}.{db}",
group_id="Store",
date_col="Date",
target="Sales",
Expand All @@ -120,7 +100,7 @@

# COMMAND ----------

# MAGIC %sql select * from solacc_uc.mmf.rossmann_metrics_output order by Store, model, backtest_window_start_date
# MAGIC %sql select * from solacc_uc.mmf.rossmann_evaluation_output order by Store, model, backtest_window_start_date

# COMMAND ----------

Expand All @@ -132,7 +112,11 @@

# COMMAND ----------

# MAGIC %sql delete from solacc_uc.mmf.rossmann_metrics_output
# MAGIC %md ### Delete Tables

# COMMAND ----------

# MAGIC %sql delete from solacc_uc.mmf.rossmann_evaluation_output

# COMMAND ----------

Expand Down
42 changes: 13 additions & 29 deletions 01_mmf_univariate_monthly_demo.py → demo_global_monthly.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,28 +88,7 @@ def transform_group(df):
# COMMAND ----------

active_models = [
"StatsForecastBaselineWindowAverage",
#"StatsForecastBaselineSeasonalWindowAverage",
#"StatsForecastBaselineNaive",
#"StatsForecastBaselineSeasonalNaive",
"StatsForecastAutoArima",
#"StatsForecastAutoETS",
#"StatsForecastAutoCES",
#"StatsForecastAutoTheta",
#"StatsForecastTSB",
#"StatsForecastADIDA",
#"StatsForecastIMAPA",
#"StatsForecastCrostonClassic",
#"StatsForecastCrostonOptimized",
#"StatsForecastCrostonSBA",
"RFableArima",
#"RFableETS",
#"RFableNNETAR",
#"RFableEnsemble",
#"RDynamicHarmonicRegression",
"SKTimeTBats",
#"SKTimeLgbmDsDt",
#"NeuralForecastRNN",
"NeuralForecastRNN",
#"NeuralForecastLSTM",
#"NeuralForecastNBEATSx",
#"NeuralForecastNHITS",
Expand Down Expand Up @@ -139,7 +118,8 @@ def transform_group(df):
train_data="mmf_train",
scoring_data="mmf_train",
scoring_output=f"{catalog}.{db}.monthly_scoring_output",
metrics_output=f"{catalog}.{db}.monthly_metrics_output",
evaluation_output=f"{catalog}.{db}.monthly_evaluation_output",
model_output=f"{catalog}.{db}",
group_id="unique_id",
date_col="date",
target="y",
Expand All @@ -162,16 +142,16 @@ def transform_group(df):

# COMMAND ----------

# MAGIC %md ### Metrics output
# MAGIC In the metrics output table, the metrics for all backtest windows and all models are stored. This info can be used to monitor model performance or decide which models should be taken into the final aggregated forecast.
# MAGIC %md ### Evaluation Output
# MAGIC In the evaluation output table, the evaluation for all backtest windows and all models are stored. This info can be used to monitor model performance or decide which models should be taken into the final aggregated forecast.

# COMMAND ----------

# MAGIC %sql select * from solacc_uc.mmf.monthly_metrics_output order by unique_id, model, backtest_window_start_date
# MAGIC %sql select * from solacc_uc.mmf.monthly_evaluation_output order by unique_id, model, backtest_window_start_date

# COMMAND ----------

# MAGIC %md ### Forecast output
# MAGIC %md ### Forecast Output
# MAGIC In the Forecast output table, the final forecast for each model and each time series is stored.

# COMMAND ----------
Expand All @@ -180,7 +160,7 @@ def transform_group(df):

# COMMAND ----------

# MAGIC %md ### Final Ensemble Output
# MAGIC %md ### Ensemble Output
# MAGIC In the final ensemble output table, we store the averaged forecast. The models which meet the threshold defined using the ensembling parameters are taken into consideration

# COMMAND ----------
Expand All @@ -189,7 +169,11 @@ def transform_group(df):

# COMMAND ----------

# MAGIC %sql delete from solacc_uc.mmf.monthly_metrics_output
# MAGIC %md ### Delete Tables

# COMMAND ----------

# MAGIC %sql delete from solacc_uc.mmf.monthly_evaluation_output

# COMMAND ----------

Expand Down
Loading

0 comments on commit 1da747e

Please sign in to comment.