-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsv2.py
More file actions
102 lines (70 loc) · 2.77 KB
/
sv2.py
File metadata and controls
102 lines (70 loc) · 2.77 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import Adam,SGD
import matplotlib as plt
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
#from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
import pickle
# To Diable Warnings
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
dataset = pd.read_csv('D:\ddd.csv')
X = dataset.drop('level',axis=1)
Y = dataset['level']
dataset2 = pd.read_csv('d:/dd2.csv')
XX = dataset2.drop('level',axis=1)
YY = dataset2['level']
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
dummy_y = np_utils.to_categorical(encoded_Y)
encoder2 = LabelEncoder()
encoder2.fit(YY)
encoded_YY = encoder.transform(YY)
dummy_yy = np_utils.to_categorical(encoded_YY)
def baseline_model():
model = Sequential()
model.add(Dense(22, input_dim=6, activation='relu'))
model.add(Dense(11, activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy'])
return model
estimator = KerasClassifier(build_fn=baseline_model, epochs=90, batch_size=10, verbose=1)
#X_train, X_test, Y_train, Y_test = train_test_split(X, dummy_y, test_size=0.1)
#estimator.fit(X_train, Y_train)
estimator.fit(X, dummy_y)
predictions = estimator.predict(XX)
predictions = encoder.inverse_transform(predictions)
original = encoder.inverse_transform(np.argmax(dummy_yy,axis=1))
#print (np.argmax(Y_test,axis=1))
print(predictions)
print(original)
results=accuracy_score(original,predictions)
print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
#encoder.inverse_transform(predictions)
#y_test = encoder.inverse_transform(Y_test)
#print(predictions)
#accuracy_score(y_test,y_pred)
#print(accuracy_score)
#print(encoder.inverse_transform(predictions))
#estimator.fit(X, dummy_y)
#predictions = estimator.predict(XX)
#print(encoder.inverse_transform(predictions))
filename = 'Saved_Model.sav'
pickle.dump(estimator, open(filename, 'wb'))
#kfold = KFold(n_splits=3, shuffle=True, random_state=7)
#results = cross_val_score(estimator, X, dummy_y, cv=kfold)
#print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
#model.fit(X, dummy_y, epochs = 80, batch_size=10)
#model = baseline_model()