|
| 1 | +module FunctionApproximation |
| 2 | + |
| 3 | +//reduced example from https://github.com/tirthajyoti/Machine-Learning-with-Python/blob/master/Function%20Approximation%20by%20Neural%20Network/Function%20approximation%20by%20linear%20model%20and%20deep%20network.ipynb |
| 4 | + |
| 5 | +open NumSharp |
| 6 | +open Tensorflow |
| 7 | +open System |
| 8 | + |
| 9 | + |
| 10 | +let run()= |
| 11 | + |
| 12 | + let N_points = 75 // Number of points for constructing function |
| 13 | + let x_min = 1.0 // Min of the range of x (feature) |
| 14 | + let x_max = 15.0 // Max of the range of x (feature) |
| 15 | + let noise_mean = 0.0 // Mean of the Gaussian noise adder |
| 16 | + let noise_sd = 10.0 // Std.Dev of the Gaussian noise adder |
| 17 | + |
| 18 | + let linspace points = [| for i in 0 .. (points - 1) -> x_min + (x_max - x_min)/(float)points * (float)i |] |
| 19 | + |
| 20 | + let func_trans(xAr:float []) = |
| 21 | + xAr |
| 22 | + |>Array.map (fun (x:float) -> (20.0 * x+3.0 * System.Math.Pow(x,2.0)+0.1 * System.Math.Pow(x,3.0))*sin(x)*exp(-0.1*x)) |
| 23 | + |
| 24 | + let X_raw = linspace N_points |
| 25 | + let Y_raw = func_trans(X_raw) |
| 26 | + let X_mtr = Array2D.init X_raw.Length 1 (fun i j -> X_raw.[i]) |
| 27 | + let X = np.array(X_mtr) |
| 28 | + |
| 29 | + let noise_x = np.random.normal(noise_mean,noise_sd,N_points) |
| 30 | + let y = np.array(Y_raw)+noise_x |
| 31 | + |
| 32 | + let X_train = X |
| 33 | + let y_train = y |
| 34 | + |
| 35 | + let learning_rate = 0.00001 |
| 36 | + let training_epochs = 35000 |
| 37 | + |
| 38 | + let n_input = 1 // Number of features |
| 39 | + let n_output = 1 // Regression output is a number only |
| 40 | + let n_hidden_layer_1 = 25 // Hidden layer 1 |
| 41 | + let n_hidden_layer_2 = 25 // Hidden layer 2 |
| 42 | + |
| 43 | + let x = tf.placeholder(tf.float64, new TensorShape(N_points,n_input)) |
| 44 | + let y = tf.placeholder(tf.float64, new TensorShape(n_output)) |
| 45 | + |
| 46 | + |
| 47 | + let weights = dict[ |
| 48 | + "hidden_layer_1", tf.Variable(tf.random_normal([|n_input; n_hidden_layer_1|],dtype=tf.float64)) |
| 49 | + "hidden_layer_2", tf.Variable(tf.random_normal([|n_hidden_layer_1; n_hidden_layer_2|],dtype=tf.float64)) |
| 50 | + "out", tf.Variable(tf.random_normal([|n_hidden_layer_2; n_output|],dtype=tf.float64)) |
| 51 | + ] |
| 52 | + let biases = dict[ |
| 53 | + "hidden_layer_1", tf.Variable(tf.random_normal([|n_hidden_layer_1|],dtype=tf.float64)) |
| 54 | + "hidden_layer_2", tf.Variable(tf.random_normal([|n_hidden_layer_2|],dtype=tf.float64)) |
| 55 | + "out", tf.Variable(tf.random_normal([|n_output|],dtype=tf.float64)) |
| 56 | + ] |
| 57 | + |
| 58 | + |
| 59 | + // Hidden layer with RELU activation |
| 60 | + |
| 61 | + let layer_1 = tf.add(tf.matmul(x, weights.["hidden_layer_1"]._AsTensor()),biases.["hidden_layer_1"]) |
| 62 | + let layer_1 = tf.nn.relu(layer_1) |
| 63 | + |
| 64 | + let layer_2 = tf.add(tf.matmul(layer_1, weights.["hidden_layer_2"]._AsTensor()),biases.["hidden_layer_2"]) |
| 65 | + let layer_2 = tf.nn.relu(layer_2) |
| 66 | + |
| 67 | + // Output layer with linear activation |
| 68 | + let ops = tf.add(tf.matmul(layer_2, weights.["out"]._AsTensor()), biases.["out"]) |
| 69 | + |
| 70 | + // Define loss and optimizer |
| 71 | + let cost = tf.reduce_mean(tf.square(tf.squeeze(ops)-y)) |
| 72 | + |
| 73 | + let gs = tf.Variable(1, trainable= false, name= "global_step") |
| 74 | + |
| 75 | + let optimizer = tf.train.GradientDescentOptimizer(learning_rate=(float32)learning_rate).minimize(cost,global_step = gs) |
| 76 | + |
| 77 | + let init = tf.global_variables_initializer() |
| 78 | + |
| 79 | + |
| 80 | + Tensorflow.Python.``with``(tf.Session(), fun (sess:Session) -> |
| 81 | + sess.run(init) |> ignore |
| 82 | + // Loop over epochs |
| 83 | + for epoch in [0..training_epochs] do |
| 84 | + // Run optimization process (backprop) and cost function (to get loss value) |
| 85 | + |
| 86 | + let result=sess.run([|optimizer:>ITensorOrOperation; gs._AsTensor():>ITensorOrOperation; cost:>ITensorOrOperation|], new FeedItem(x, X_train), new FeedItem(y, y_train)) |
| 87 | + |
| 88 | + |
| 89 | + let loss_value = (double) result.[2]; |
| 90 | + |
| 91 | + let step = (int) result.[1]; |
| 92 | + |
| 93 | + if epoch % 1000 = 0 then |
| 94 | + sprintf "Step %d loss: %f" step loss_value |> Console.WriteLine |
| 95 | + let w=sess.run(weights |> Array.ofSeq |> Array.map (fun pair -> pair.Value)) |
| 96 | + let b = sess.run(biases |> Array.ofSeq |> Array.map (fun pair -> pair.Value)) |
| 97 | + let yhat=sess.run([|ops:>ITensorOrOperation|],new FeedItem(x,X_train)) |
| 98 | + for i in [0..(N_points-1)] do |
| 99 | + sprintf "pred %f real: %f" ((double)(yhat.[0].[i].[0])) ((double)Y_raw.[i]) |> Console.WriteLine |
| 100 | + ) |
| 101 | + |
| 102 | + |
| 103 | + |
| 104 | + |
0 commit comments