Created
July 28, 2022 14:26
-
-
Save kylegallatin/ede2e91bf7f4f52e3c00a1ffb0e5f2c5 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from sklearn.pipeline import make_pipeline | |
from sklearn.linear_model import LogisticRegression | |
from sklearn.preprocessing import StandardScaler | |
from sklearn.model_selection import train_test_split | |
## logistic regression parameter config | |
parameters = { | |
"penalty":"l2", | |
"C":1.0, | |
"max_iter": 100 | |
} | |
## use a standard scaler and logistic regression | |
scaler = StandardScaler() | |
logistic_regression = LogisticRegression( | |
penalty=parameters["penalty"], | |
C=parameters["C"], | |
max_iter=parameters["max_iter"], | |
) | |
## make a pipeline out of them | |
pipeline = make_pipeline(scaler, logistic_regression) | |
## get our data from the feature store and create a train/test split | |
data = feature_store.get_all_data() | |
X_train, X_test, y_train, y_test = train_test_split(data[["feature_1","feature_2","feature_3"]], data["target"]) | |
## fit the model | |
pipeline.fit(X_train, y_train) | |
## get the test score | |
score = pipeline.score(X_test, y_test) | |
## record it | |
record_model(pipeline, score, parameters) | |
## view the output | |
pd.read_csv("metadata_store.csv") |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment