forked from seg/2016-ml-contest
-
Notifications
You must be signed in to change notification settings - Fork 0
/
facies_w_tf_submit.py
101 lines (79 loc) · 3.56 KB
/
facies_w_tf_submit.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
################################################################################
#
# Facies Classification using ML in (Google) TensorFlow
#
# Russell A. Kappius
# Kappius Consulting LLC
# Supported by Sterling Seismic Services
#
# January 18, 2017
#
################################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
import numpy as np
#np.random.seed(0)
# Input from csv files
################################################################################
# 2. Current method: Use 'facies_vectors.csv' and 'validation_data_nofacies.csv'
################################################################################
training_data = pd.read_csv('facies_vectors.csv')
test_data = pd.read_csv('validation_data_nofacies.csv')
# isolate training vectors & labels, and test vectors & create labels as just 1
all_vectors = training_data.drop(['Facies','Formation', 'Well Name', 'Depth'], axis=1)
all_labels = training_data['Facies'].values
# Remove NaNs
nan_idx = np.any(np.isnan(all_vectors), axis=1)
training_vectors = all_vectors[np.logical_not(nan_idx)]
training_labels = all_labels [np.logical_not(nan_idx)]
test_vectors = test_data.drop(['Formation', 'Well Name', 'Depth'], axis=1)
test_labels = np.ones(test_vectors.shape[0], dtype=np.int)
################################################################################
################################################################################
# Scale feature vectors
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(training_vectors)
scaled_training_vectors = scaler.transform(training_vectors)
test_scaler = preprocessing.StandardScaler().fit(test_vectors)
scaled_test_vectors = test_scaler.transform(test_vectors)
################################################################################
################################################################################
# use (my) DataSet class to provide 'next_batch' functionality to TensorFlow
# Also changes labels to 'one-hot' 2D arrays
import DataSet
training_dataset = DataSet.load_dataset(scaled_training_vectors,training_labels)
test_dataset = DataSet.load_dataset(scaled_test_vectors,test_labels)
################################################################################
################################################################################
# Solve with (Google) TensorFlow
import tensorflow as tf
# Create the model
# 7 elements in each feature vector, 9 possible facies
x = tf.placeholder(tf.float32, [None, 7])
W = tf.Variable(tf.zeros([7, 9]))
b = tf.Variable(tf.zeros([9]))
y = tf.matmul(x, W) + b
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None,9])
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# create a session
sess = tf.InteractiveSession()
tf.initialize_all_variables().run()
# Train
for _ in range(1000):
batch_xs, batch_ys = training_dataset.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# produce unknown labels
run_test = tf.argmax(y,1)
test_labels = \
sess.run(run_test, \
feed_dict={x: test_dataset.feature_vectors, y_: test_dataset.labels})
# save predicted labels
test_data['Facies'] = test_labels
test_data.to_csv('PredictedResults.csv')
#print(test_labels)
print('done')
################################################################################