diff --git a/Stock_Price_Prediction(Updated).ipynb b/Stock_Price_Prediction(Updated).ipynb index 84a90cd..15f1fc8 100644 --- a/Stock_Price_Prediction(Updated).ipynb +++ b/Stock_Price_Prediction(Updated).ipynb @@ -5220,7 +5220,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "This code demonstrates a simple ensemble method using random forest and AdaBoost. It combines the predictions of these two models by taking their average. You can experiment with different ensemble techniques like stacking or voting, and adjust the hyperparameters of the individual models to find the best combination for your specific problem." + "### Ensemble method using random forest and AdaBoost" ] }, { @@ -5255,6 +5255,42 @@ "ensemble_mse = mean_squared_error(y_test, ensemble_predictions)\n", "print(\"Ensemble MSE:\", ensemble_mse)" ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Validation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.metrics import accuracy_score, mean_squared_error, mean_absolute_error\n", + "\n", + "# Assuming we have the true labels (y_test) and the ensemble predictions (ensemble_predictions)\n", + "\n", + "# Calculate accuracy\n", + "accuracy = accuracy_score(y_test, ensemble_predictions.round())\n", + "print(\"Accuracy:\", accuracy)\n", + "\n", + "# Calculate RMSE\n", + "rmse = mean_squared_error(y_test, ensemble_predictions, squared=False)\n", + "print(\"RMSE:\", rmse)\n", + "\n", + "# Calculate MAE\n", + "mae = mean_absolute_error(y_test, ensemble_predictions)\n", + "print(\"MAE:\", mae)\n", + "\n", + "# Other relevant metrics\n", + "# For example, if your target variable is categorical:\n", + "# precision = precision_score(y_test, ensemble_predictions.round())\n", + "# recall = recall_score(y_test, ensemble_predictions.round())\n", + "# f1_score = f1_score(y_test, ensemble_predictions.round())" + ] } ], "metadata": {