顾老师新书《全栈软件测试工程师宝典》
https://item.m.jd.com/product/10023427978355.html
以前两本书的网上购买地址:
《软件测试技术实战设计、工具及管理》:
https://item.jd.com/34295655089.html
《基于Django的电子商务网站》:
https://item.jd.com/12082665.html
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
class Better:
def __init__(self,data):
self.n_jobs = 2
self.data = data
self.prams=[{'reg':[LinearRegression()],'scaler':[StandardScaler(),MinMaxScaler(),None],"reg__n_jobs":[self.n_jobs]},
{'reg':[LogisticRegression()],'scaler':[StandardScaler(),MinMaxScaler(),None],"reg__n_jobs":[self.n_jobs]},
{'reg':[Ridge()],'scaler':[StandardScaler(),MinMaxScaler(),None],"reg__alpha":[1,0.1,0.001,0.0001]},
{'reg':[Lasso()],'scaler':[StandardScaler(),MinMaxScaler(),None],"reg__alpha":[1,0.1,0.001,0.0001]},
{'reg':[ElasticNet()],'scaler':[StandardScaler(),MinMaxScaler(),None],"reg__alpha":[0.1,0.5,1,5,10],"reg__l1_ratio":[0.1,0.5,0.9]},
{'reg':[RandomForestClassifier()],'scaler':[StandardScaler(),MinMaxScaler(),None],"reg__n_estimators":[4,5,6,7],"reg__random_state":[2,3,4,5],"reg__n_jobs":[self.n_jobs],"reg__random_state":[range(0,200)]},
{'reg':[RandomForestRegressor()],'scaler':[StandardScaler(),MinMaxScaler(),None],"reg__n_estimators":[4,5,6,7],"reg__random_state":[2,3,4,5],"reg__n_jobs":[self.n_jobs],"reg__random_state":[range(0,200)]},
{'reg':[DecisionTreeClassifier()],'scaler':[StandardScaler(),MinMaxScaler(),None],"reg__max_depth":[1,3,5,7],"reg__random_state":[range(1,200)]},
{'reg':[DecisionTreeRegressor()],'scaler':[StandardScaler(),MinMaxScaler(),None],"reg__max_depth":[1,3,5,7],"reg__random_state":[range(1,200)]},
{'reg':[KNeighborsClassifier()],'scaler':[StandardScaler(),MinMaxScaler(),None],"reg__n_jobs":[self.n_jobs]},
{'reg':[KNeighborsRegressor()],'scaler':[StandardScaler(),MinMaxScaler(),None],"reg__n_jobs":[self.n_jobs]},
{'reg':[BernoulliNB()],'scaler':[StandardScaler(),MinMaxScaler(),None]},
{'reg':[GaussianNB()],'scaler':[StandardScaler(),MinMaxScaler(),None]},
{'reg':[MultinomialNB()],'scaler':[MinMaxScaler()]},
{'reg':[SVC(max_iter=10000)],'scaler':[StandardScaler(),MinMaxScaler(),None],"reg__kernel":["linear","rbf","sigmoid","poly"],"reg__gamma":[0.01,0.1,1,5,10],"reg__C":[1.0,3.0,5.0]},
{'reg':[SVR(max_iter=100000)],'scaler':[StandardScaler(),MinMaxScaler(),None],"reg__kernel":["linear","rbf","sigmoid","poly"],"reg__gamma":[0.01,0.1,1,5,10],"reg__C":[1.0,3.0,5.0]},
{'reg':[LinearSVC(max_iter=100000)],'scaler':[StandardScaler(),MinMaxScaler(),None],"reg__C":[1.0,3.0,5.0]},
{'reg':[LinearSVR(max_iter=100000)],'scaler':[StandardScaler(),MinMaxScaler(),None],"reg__C":[1.0,3.0,5.0]},
{'reg':[AdaBoostClassifier()],'scaler':[StandardScaler(),MinMaxScaler(),None],"reg__random_state":[range(1,200)]},
{'reg':[AdaBoostRegressor()],'scaler':[StandardScaler(),MinMaxScaler(),None],"reg__random_state":[range(1,200)]},
{'reg':[VotingClassifier(estimators=[('log_clf',LogisticRegression()),('svm_clf', SVC(probability=True)),('dt_clf',DecisionTreeClassifier(random_state=666))])],'scaler':[StandardScaler(),MinMaxScaler(),None],"reg__voting":["hard","soft"],"reg__n_jobs":[self.n_jobs]},
{'reg':[LinearDiscriminantAnalysis(n_components=2)],'scaler':[StandardScaler(),MinMaxScaler(),None]},
{'reg':[MLPClassifier(max_iter=100000)],'scaler':[StandardScaler(),MinMaxScaler(),None],"reg__activation":["relu","tanh","identity","logistic"],"reg__alpha":[0.0001,0.001,0.01,1],"reg__hidden_layer_sizes":[(1),(50,),(100,),(1,1),(50,50),(100,100)]},
{'reg':[MLPRegressor(max_iter=100000)],'scaler':[StandardScaler(),MinMaxScaler(),None],"reg__activation":["relu","tanh","identity","logistic"],"reg__alpha":[0.0001,0.001,0.01,1],"reg__hidden_layer_sizes":[(1),(50,),(100,),(1,1),(50,50),(100,100)]}
def Get_Better_Algorithm_and_Parameter(self):
warnings.filterwarnings("ignore")
if self.data =="iris":
random_state= 40
elif self.data =="wine":
random_state= 23
elif self.data =="breast_cancer":
random_state= 41
elif self.data =="diabetes":
random_state= 80
elif self.data =="boston":
random_state= 67
else:
random_state= 0
ML =Machine_Learn()
X,y =ML.get_data(self.data)
X_train, X_test,y_train, y_test = train_test_split(X, y,random_state=random_state)
pipe =Pipeline([('scaler',StandardScaler()),('reg',LinearRegression())])
shuffle_split =ShuffleSplit(test_size=.2,train_size=.7,n_splits=10)
grid =GridSearchCV(pipe,self.prams,cv=shuffle_split)
grid.fit(X_train,y_train)
print("最佳模型是:{}".format(grid.best_params_))
print("模型最佳训练得分:{:.2%}".format(grid.best_score_))
print("模型最佳测试得分:{:.2%}".format(grid.score(X_test,y_test)))
better =Better("iris")
better.Get_Better_Algorithm_and_Parameter()
输出
最佳模型是:{'reg': MLPClassifier(alpha=1,hidden_layer_sizes=(100, 100), max_iter=100000), 'reg__activation': 'relu','reg__alpha': 1, 'reg__hidden_layer_sizes': (100, 100), 'scaler':MinMaxScaler()}
模型最佳训练得分:78.70%
模型最佳测试得分:84.21%
所有的关于机器分享的文章到这里结束
—————————————————————————————————
扫码关注腾讯云开发者
领取腾讯云代金券
Copyright © 2013 - 2025 Tencent Cloud. All Rights Reserved. 腾讯云 版权所有
深圳市腾讯计算机系统有限公司 ICP备案/许可证号:粤B2-20090059 深公网安备号 44030502008569
腾讯云计算(北京)有限责任公司 京ICP证150476号 | 京ICP备11018762号 | 京公网安备号11010802020287
Copyright © 2013 - 2025 Tencent Cloud.
All Rights Reserved. 腾讯云 版权所有