Skip to content

Commit 09d77e2

Browse files
authored
Merge pull request SciSharp#254 from AndreiDegtiarev/master
Inconsistency between DT_FLOAT and DT_DOUBLE types + f# example
2 parents c0fd135 + 917ff43 commit 09d77e2

File tree

5 files changed

+136
-1
lines changed

5 files changed

+136
-1
lines changed

TensorFlow.NET.sln

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,8 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Keras.Example", "test\Keras
1515
EndProject
1616
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Keras.UnitTest", "test\KerasNET.Test\Keras.UnitTest.csproj", "{A5839A45-A117-4BEA-898B-DE1ED6E0D58F}"
1717
EndProject
18+
Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "TensorFlowNET.Examples.FSharp", "test\TensorFlowNET.Examples.FSharp\TensorFlowNET.Examples.FSharp.fsproj", "{62BC3801-F0D3-44A9-A0AC-712F40C8F961}"
19+
EndProject
1820
Global
1921
GlobalSection(SolutionConfigurationPlatforms) = preSolution
2022
Debug|Any CPU = Debug|Any CPU
@@ -45,6 +47,10 @@ Global
4547
{A5839A45-A117-4BEA-898B-DE1ED6E0D58F}.Debug|Any CPU.Build.0 = Debug|Any CPU
4648
{A5839A45-A117-4BEA-898B-DE1ED6E0D58F}.Release|Any CPU.ActiveCfg = Release|Any CPU
4749
{A5839A45-A117-4BEA-898B-DE1ED6E0D58F}.Release|Any CPU.Build.0 = Release|Any CPU
50+
{62BC3801-F0D3-44A9-A0AC-712F40C8F961}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
51+
{62BC3801-F0D3-44A9-A0AC-712F40C8F961}.Debug|Any CPU.Build.0 = Debug|Any CPU
52+
{62BC3801-F0D3-44A9-A0AC-712F40C8F961}.Release|Any CPU.ActiveCfg = Release|Any CPU
53+
{62BC3801-F0D3-44A9-A0AC-712F40C8F961}.Release|Any CPU.Build.0 = Release|Any CPU
4854
EndGlobalSection
4955
GlobalSection(SolutionProperties) = preSolution
5056
HideSolutionNode = FALSE

src/TensorFlowNET.Core/Gradients/gradients_impl.py.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -368,7 +368,7 @@ private static Tensor[] _DefaultGradYs(Tensor[] grad_ys, Tensor[] ys, bool coloc
368368
if (y.dtype.is_complex())
369369
throw new TypeAccessException($"Gradients of complex tensors must set grad_ys (y.dtype = {y.dtype})");
370370
var shape = array_ops.shape(y);
371-
var constant = constant_op.constant(1.0f, name: $"grad_ys_{i}");
371+
var constant = constant_op.constant(y.dtype == TF_DataType.TF_DOUBLE ? (object)1.0 : (object)1.0f, name: $"grad_ys_{i}");
372372
var fill = gen_array_ops.fill(shape, constant);
373373
new_grad_ys.Add(fill);
374374
}
Lines changed: 104 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,104 @@
1+
module FunctionApproximation
2+
3+
//reduced example from https://github.com/tirthajyoti/Machine-Learning-with-Python/blob/master/Function%20Approximation%20by%20Neural%20Network/Function%20approximation%20by%20linear%20model%20and%20deep%20network.ipynb
4+
5+
open NumSharp
6+
open Tensorflow
7+
open System
8+
9+
10+
let run()=
11+
12+
let N_points = 75 // Number of points for constructing function
13+
let x_min = 1.0 // Min of the range of x (feature)
14+
let x_max = 15.0 // Max of the range of x (feature)
15+
let noise_mean = 0.0 // Mean of the Gaussian noise adder
16+
let noise_sd = 10.0 // Std.Dev of the Gaussian noise adder
17+
18+
let linspace points = [| for i in 0 .. (points - 1) -> x_min + (x_max - x_min)/(float)points * (float)i |]
19+
20+
let func_trans(xAr:float []) =
21+
xAr
22+
|>Array.map (fun (x:float) -> (20.0 * x+3.0 * System.Math.Pow(x,2.0)+0.1 * System.Math.Pow(x,3.0))*sin(x)*exp(-0.1*x))
23+
24+
let X_raw = linspace N_points
25+
let Y_raw = func_trans(X_raw)
26+
let X_mtr = Array2D.init X_raw.Length 1 (fun i j -> X_raw.[i])
27+
let X = np.array(X_mtr)
28+
29+
let noise_x = np.random.normal(noise_mean,noise_sd,N_points)
30+
let y = np.array(Y_raw)+noise_x
31+
32+
let X_train = X
33+
let y_train = y
34+
35+
let learning_rate = 0.00001
36+
let training_epochs = 35000
37+
38+
let n_input = 1 // Number of features
39+
let n_output = 1 // Regression output is a number only
40+
let n_hidden_layer_1 = 25 // Hidden layer 1
41+
let n_hidden_layer_2 = 25 // Hidden layer 2
42+
43+
let x = tf.placeholder(tf.float64, new TensorShape(N_points,n_input))
44+
let y = tf.placeholder(tf.float64, new TensorShape(n_output))
45+
46+
47+
let weights = dict[
48+
"hidden_layer_1", tf.Variable(tf.random_normal([|n_input; n_hidden_layer_1|],dtype=tf.float64))
49+
"hidden_layer_2", tf.Variable(tf.random_normal([|n_hidden_layer_1; n_hidden_layer_2|],dtype=tf.float64))
50+
"out", tf.Variable(tf.random_normal([|n_hidden_layer_2; n_output|],dtype=tf.float64))
51+
]
52+
let biases = dict[
53+
"hidden_layer_1", tf.Variable(tf.random_normal([|n_hidden_layer_1|],dtype=tf.float64))
54+
"hidden_layer_2", tf.Variable(tf.random_normal([|n_hidden_layer_2|],dtype=tf.float64))
55+
"out", tf.Variable(tf.random_normal([|n_output|],dtype=tf.float64))
56+
]
57+
58+
59+
// Hidden layer with RELU activation
60+
61+
let layer_1 = tf.add(tf.matmul(x, weights.["hidden_layer_1"]._AsTensor()),biases.["hidden_layer_1"])
62+
let layer_1 = tf.nn.relu(layer_1)
63+
64+
let layer_2 = tf.add(tf.matmul(layer_1, weights.["hidden_layer_2"]._AsTensor()),biases.["hidden_layer_2"])
65+
let layer_2 = tf.nn.relu(layer_2)
66+
67+
// Output layer with linear activation
68+
let ops = tf.add(tf.matmul(layer_2, weights.["out"]._AsTensor()), biases.["out"])
69+
70+
// Define loss and optimizer
71+
let cost = tf.reduce_mean(tf.square(tf.squeeze(ops)-y))
72+
73+
let gs = tf.Variable(1, trainable= false, name= "global_step")
74+
75+
let optimizer = tf.train.GradientDescentOptimizer(learning_rate=(float32)learning_rate).minimize(cost,global_step = gs)
76+
77+
let init = tf.global_variables_initializer()
78+
79+
80+
Tensorflow.Python.``with``(tf.Session(), fun (sess:Session) ->
81+
sess.run(init) |> ignore
82+
// Loop over epochs
83+
for epoch in [0..training_epochs] do
84+
// Run optimization process (backprop) and cost function (to get loss value)
85+
86+
let result=sess.run([|optimizer:>ITensorOrOperation; gs._AsTensor():>ITensorOrOperation; cost:>ITensorOrOperation|], new FeedItem(x, X_train), new FeedItem(y, y_train))
87+
88+
89+
let loss_value = (double) result.[2];
90+
91+
let step = (int) result.[1];
92+
93+
if epoch % 1000 = 0 then
94+
sprintf "Step %d loss: %f" step loss_value |> Console.WriteLine
95+
let w=sess.run(weights |> Array.ofSeq |> Array.map (fun pair -> pair.Value))
96+
let b = sess.run(biases |> Array.ofSeq |> Array.map (fun pair -> pair.Value))
97+
let yhat=sess.run([|ops:>ITensorOrOperation|],new FeedItem(x,X_train))
98+
for i in [0..(N_points-1)] do
99+
sprintf "pred %f real: %f" ((double)(yhat.[0].[i].[0])) ((double)Y_raw.[i]) |> Console.WriteLine
100+
)
101+
102+
103+
104+
Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
// Learn more about F# at http://fsharp.org
2+
3+
open System
4+
5+
[<EntryPoint>]
6+
let main argv =
7+
FunctionApproximation.run()
8+
0 // return an integer exit code
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
<Project Sdk="Microsoft.NET.Sdk">
2+
3+
<PropertyGroup>
4+
<OutputType>Exe</OutputType>
5+
<TargetFramework>netcoreapp2.2</TargetFramework>
6+
</PropertyGroup>
7+
8+
<ItemGroup>
9+
<Compile Include="FunctionApproximation.fs" />
10+
<Compile Include="Program.fs" />
11+
</ItemGroup>
12+
13+
<ItemGroup>
14+
<ProjectReference Include="..\..\src\TensorFlowNET.Core\TensorFlowNET.Core.csproj" />
15+
</ItemGroup>
16+
17+
</Project>

0 commit comments

Comments
 (0)