Unterschiede
Hier werden die Unterschiede zwischen zwei Versionen angezeigt.
| Nächste Überarbeitung | Vorhergehende Überarbeitung | ||
| de:modul:m245:learningunits:lu02:loesungen:l02 [2026/01/05 13:26] – angelegt vdemir | de:modul:m245:learningunits:lu02:loesungen:l02 [2026/01/05 13:32] (aktuell) – [3. Modellvergleich] vdemir | ||
|---|---|---|---|
| Zeile 7: | Zeile 7: | ||
| ===== Python-Skript: | ===== Python-Skript: | ||
| - | import pandas as pd | + | |
| - | from sklearn.model_selection import train_test_split | + | from sklearn.model_selection import train_test_split |
| - | from sklearn.pipeline import Pipeline | + | from sklearn.pipeline import Pipeline |
| - | from sklearn.preprocessing import StandardScaler | + | from sklearn.preprocessing import StandardScaler |
| - | from sklearn.linear_model import LogisticRegression | + | from sklearn.linear_model import LogisticRegression |
| - | from sklearn.tree import DecisionTreeClassifier | + | from sklearn.tree import DecisionTreeClassifier |
| - | from sklearn.metrics import accuracy_score, | + | from sklearn.metrics import accuracy_score, |
| - | import joblib | + | import joblib |
| - | + | # | |
| - | # ----------------------------- | + | # ----------------------------- |
| - | # Daten laden | + | # Daten laden |
| - | # ----------------------------- | + | # ----------------------------- |
| - | data = pd.read_csv(" | + | data = pd.read_csv(" |
| - | + | # | |
| - | X = data.drop(" | + | X = data.drop(" |
| - | y = data[" | + | y = data[" |
| - | + | # | |
| - | # ----------------------------- | + | # ----------------------------- |
| - | # Train / Test Split | + | # Train / Test Split |
| - | # ----------------------------- | + | # ----------------------------- |
| - | X_train, X_test, y_train, y_test = train_test_split( | + | X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, |
| - | | + | # |
| - | ) | + | # ----------------------------- |
| - | + | # Modell 1: Logistische Regression | |
| - | # ----------------------------- | + | # ----------------------------- |
| - | # Modell 1: Logistische Regression | + | log_reg_pipeline = Pipeline([ |
| - | # ----------------------------- | + | (" |
| - | log_reg_pipeline = Pipeline([ | + | (" |
| - | (" | + | ]) |
| - | (" | + | # |
| - | ]) | + | log_reg_pipeline.fit(X_train, |
| - | + | y_pred_lr = log_reg_pipeline.predict(X_test) | |
| - | log_reg_pipeline.fit(X_train, | + | # |
| - | y_pred_lr = log_reg_pipeline.predict(X_test) | + | print(" |
| - | + | print(" | |
| - | print(" | + | print(" |
| - | print(" | + | print(" |
| - | print(" | + | # |
| - | print(" | + | # ----------------------------- |
| - | + | # Modell 2: Decision Tree | |
| - | # ----------------------------- | + | # ----------------------------- |
| - | # Modell 2: Decision Tree | + | tree_model = DecisionTreeClassifier(random_state=42) |
| - | # ----------------------------- | + | tree_model.fit(X_train, |
| - | tree_model = DecisionTreeClassifier(random_state=42) | + | y_pred_tree = tree_model.predict(X_test) |
| - | tree_model.fit(X_train, | + | # |
| - | y_pred_tree = tree_model.predict(X_test) | + | print(" |
| - | + | print(" | |
| - | print(" | + | print(" |
| - | print(" | + | print(" |
| - | print(" | + | # |
| - | print(" | + | # ----------------------------- |
| - | + | # Bestes Modell speichern | |
| - | # ----------------------------- | + | # ----------------------------- |
| - | # Bestes Modell speichern | + | joblib.dump(log_reg_pipeline, |
| - | # ----------------------------- | + | # |
| - | joblib.dump(log_reg_pipeline, | + | # ----------------------------- |
| - | + | # Neue Vorhersage | |
| - | # ----------------------------- | + | # ----------------------------- |
| - | # Neue Vorhersage | + | new_customer = pd.DataFrame([{ |
| - | # ----------------------------- | + | " |
| - | new_customer = pd.DataFrame([{ | + | " |
| - | " | + | " |
| - | " | + | }]) |
| - | " | + | # |
| - | }]) | + | loaded_model = joblib.load(" |
| - | + | prediction = loaded_model.predict(new_customer) | |
| - | loaded_model = joblib.load(" | + | # |
| - | prediction = loaded_model.predict(new_customer) | + | print(" |
| - | + | ||
| - | print(" | + | |
| - | + | ===== Modellvergleich ===== | |
| + | ^ Kriterium ^ Logistische Regression ^ Decision Tree ^ | ||
| + | | Interpretierbarkeit | hoch | mittel | | ||
| + | | Overfitting-Gefahr | gering | hoch | | ||
| + | | Skalierung | noetig ja | nein | | ||
| + | | Didaktisch | sinnvoll sehr | ja | | ||
| + | Fazit: | ||
| + | Bei kleinen, sauberen Datensaetzen ist die Logistische Regression meist stabiler. | ||
| + | Decision Trees sind anschaulich, | ||
| + | | ||