人工智能是一个主题,尝试使用神经网络作为模型建立化合物物理性质的预测模型。机器学习库是由Google开发和使用的TensorFlow。Keras是一个使TensorFlow的神经网络功能更易于使用的软件包。
<数据集文件见:https://download.csdn.net/download/u012325865/10670205>
代码示例
#导入依赖包
from rdkit import Chem
from rdkit.Chem.Draw import IPythonConsole
from mordred import descriptors, Calculator #pip install mordred
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn import model_selection
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD
calc = Calculator(descriptors, ignore_3D = True)
#加载数据
sdf = [ mol for mol in Chem.SDMolSupplier('solubility.sdf')]
#使用mordred计算sdf文件中的分子化学描述符
X = calc.pandas(sdf).astype('float').dropna(axis = 1)
#转换为Numpy格式数组
X = np.array(X, dtype = np.float32)
#转换为平均值0,每个描述符的色散1
st = StandardScaler()
X= st.fit_transform(X)
#保存到npy文件供以后重用
np.save("X_2d.npy", X)
#定义读取溶解度的函数
def getResponse( mols, prop= "SOL" ):
Y = []
for mol in mols:
act = mol.GetProp( prop )
Y.append( act )
return Y
#从sdf文件中读取溶解度
Y = getResponse(sdf)
#转换为Numpy格式数组
Y = np.array(Y, dtype = np.float32)
#保存到npy文件供以后重用
np.save("Y_2d.npy", Y)
#重新随机划分训练集和测试集
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, Y, test_size=0.25, random_state=42)
np.save("X_train.npy", X_train)
np.save("X_test.npy", X_test)
np.save("y_train.npy", y_train)
np.save("y_test.npy", y_test)
model = Sequential()
#输入层。传递给下一层的维度为50。 输入数据维度(input_dim)是1114。
model.add(Dense(units = 50, input_dim = X.shape[1]))
model.add(Activation("sigmoid"))
#输出层。 维度1,即输出单个值。
model.add(Dense(units = 1))
model.summary()
#SGD是随机梯度下降法。 nesterov是Nesterov的加速度梯度下降法。
model.compile(loss = 'mean_squared_error',
optimizer = SGD(lr = 0.01, momentum = 0.9, nesterov = True),
metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs = 100, batch_size = 32,
validation_data = (X_test, y_test))
score = model.evaluate(X_test, y_test, verbose = 0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
y_pred = model.predict(X_test)
rms = (np.mean((y_test - y_pred) ** 2)) ** 0.5
#s = np.std(y_test - y_pred)
print("Neural Network RMS", rms)
%matplotlib inline
import matplotlib.pyplot as plt
plt.figure()
plt.scatter(y_train, model.predict(X_train), label = 'Train', c = 'blue')
plt.title('Neural Network Predictor')
plt.xlabel('Measured Solubility')
plt.ylabel('Predicted Solubility')
plt.scatter(y_test, model.predict(X_test), c = 'lightgreen', label = 'Test', alpha = 0.8)
plt.legend(loc = 4)
plt.savefig('Neural Network Predictor.png', dpi=300)
plt.show()
import matplotlib.pyplot as plt
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = len(loss)
plt.plot(range(epochs), loss, marker = '.', label = 'loss')
plt.plot(range(epochs), val_loss, marker = '.', label = 'val_loss')
plt.legend(loc = 'best')
plt.grid()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
model.compile(loss = 'mean_squared_error',
optimizer = SGD(lr = 0.01, momentum = 0.9, nesterov = True),
metrics=['accuracy'])
from keras.callbacks import EarlyStopping
history = model.fit(X_train, y_train, epochs = 100, batch_size = 32,
validation_data=(X_test, y_test), callbacks = [EarlyStopping()])
score = model.evaluate(X_test, y_test, verbose = 0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
y_pred = model.predict(X_test)
rms = (np.mean((y_test - y_pred) ** 2)) ** 0.5
#s = np.std(y_test - y_pred)
print("Neural Network RMS", rms)
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn import model_selection
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.cross_decomposition import PLSRegression
import sklearn
print("sklearn ver.", sklearn.__version__)
print("numpy ver.", np.__version__)
#加载保存的数据文件
X = np.load("X_2d.npy")
Y = np.load("Y_2d.npy")
#随机划分训练集和测试集
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,
Y, test_size = 0.25, random_state = 42)
#计算解释溶解度分散的因子并使用多达15的因子进行回归分析。
pls2 = PLSRegression(n_components = 15, scale = True)
pls2.fit(X_train, y_train)
pred_train = pls2.predict(X_train)
pred_test = pls2.predict(X_test)
rms = (np.mean((y_test - pred_test)**2))**0.5
#s = np.std(y_test - y_pred)
print("PLS regression RMS", rms)
PLS regression RMS 2.834230670918034
import pylab as plt
plt.figure()
plt.scatter(y_train, pred_train, label = 'Train', c = 'blue')
plt.title('PLSR Predictor')
plt.xlabel('Measured Solubility')
plt.ylabel('Predicted Solubility')
plt.scatter(y_test, pred_test, c = 'lightgreen', label = 'Test', alpha = 0.8)
plt.legend(loc = 4)
plt.savefig('PLSR Predictor.png', dpi=300)
plt.show()
参考资料:
http://www.ag.kagawa-u.ac.jp/charlesy/2017/07/21/keras%E3%81%A7%E5%8C%96%E5%90%88%E7%89%A9%E3%81%AE%E6%BA%B6%E8%A7%A3%E5%BA%A6%E4%BA%88%E6%B8%AC%EF%BC%88%E3%83%8B%E3%83%A5%E3%83%BC%E3%83%A9%E3%83%AB%E3%83%8D%E3%83%83%E3%83%88%E3%83%AF%E3%83%BC/