Alle Parameter auf einem Blick
{
"id": 1,
"gai": "example_gai",
"target_attribute": "target",
"forecast_length": 10,
"context_length": 10,
"feature_attributes": ["feature1", "feature2", "feature3"],
"start_date": "2024-01-01",
"parameters": {
"num_classes": null,
"binary_encoding": null,
"num_lstm_layers": 2,
"lstm_units": 50,
"activation": "tanh",
"learning_rate": 0.001,
"optimizer_type": "adam",
"clipnorm": null,
"loss": "mean_squared_error",
"dropout_rate": 0.0,
"recurrent_dropout_rate": 0.0,
"num_dense_layers": 0,
"dense_units": 50,
"dense_activation": "relu",
"use_batch_norm": false,
"metrics": ["mse"],
"stateful": true,
"batch_size": 1
},
"trainingparameters": {
"epochs": 50,
"patience": 5,
"sleep_time": 3600,
"percentage_data_when_to_retrain": 1.15,
"validation_split": 0.2,
"objective": "val_loss"
},
"hyperparameters": {
"activation": ["tanh", "relu", "sigmoid", "linear"],
"dense_activation": ["tanh", "relu", "sigmoid", "linear"],
"num_lstm_layers": { "min_value": 1, "max_value": 4, "step": 1 },
"lstm_units": { "min_value": 32, "max_value": 256, "step": 32 },
"dropout_rate": { "min_value": 0.0, "max_value": 0.5, "step": 0.1 },
"recurrent_dropout_rate": { "min_value": 0.0, "max_value": 0.5, "step": 0.1 },
"num_dense_layers": { "min_value": 0, "max_value": 3, "step": 1 },
"dense_units": { "min_value": 32, "max_value": 256, "step": 32 },
"learning_rate": { "min_value": 0.0001, "max_value": 0.01, "sampling": "log" },
"optimizer_type": ["adam", "sgd", "rmsprop", "adagrad", "adadelta", "adamax", "nadam"],
"use_batch_norm": false,
"percent_data": 0.01,
"max_trials": 100,
"best_trails_percent": 0.05
},
"train": false,
"forecast": false,
"datalength": 0,
"latest_timestamp": null,
"scaler": "",
"state": null
"processing_status": "new",
}
Last updated
Was this helpful?