Skip to content
Toggle navigation
P
Projects
G
Groups
S
Snippets
Help
IAS0360
/
IAS0360_lab_excercises_2024
This project
Loading...
Sign in
Toggle navigation
Go to a project
Project
Repository
Pipelines
Members
Activity
Graph
Charts
Create a new issue
Commits
Issue Boards
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Commit
a4da20db
authored
Sep 27, 2024
by
Nazrul_being
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Added lab 4
parent
4540f6da
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
205 additions
and
110 deletions
.gitignore
lab_3/lab3.cpp
lab_4/Makefile
lab_4/lab4.cpp
lib/includes/NeuralNetwork.h
lib/src/NeuralNetwork.cpp
.gitignore
View file @
a4da20db
.DS_Store
build
\ No newline at end of file
build
.vscode
\ No newline at end of file
lab_3/lab3.cpp
View file @
a4da20db
#include <iostream>
#include <vector>
#include <cmath>
#include "../lib/includes/NeuralNetwork.h"
#define NUM_OF_FEATURES 3 // Number of input features (e.g., temperature, humidity, air quality)
#define NUM_OF_HIDDEN_NODES 3 // Number of neurons in the hidden layer
#define NUM_OF_OUTPUT_NODES 1 // Number of output nodes (e.g., predicted class)
double
learning_rate
=
0.01
;
// Learning rate for updating weights (not used directly in this example)
// Intermediate outputs and storage for the hidden layer
std
::
vector
<
double
>
hiddenLayerOutput
(
NUM_OF_HIDDEN_NODES
);
// Output of the hidden layer (for each example)
std
::
vector
<
double
>
hiddenLayerBias
=
{
0
,
0
,
0
};
// Initialize biases for the hidden layer neurons
std
::
vector
<
double
>
hiddenLayerWeightedSum
(
NUM_OF_HIDDEN_NODES
);
// Weighted sum (z1) before applying activation function
// Weights from input layer to hidden layer
std
::
vector
<
std
::
vector
<
double
>>
inputToHiddenWeights
=
{
{
0.25
,
0.5
,
0.05
},
// Weights for hidden neuron 1
{
0.8
,
0.82
,
0.3
},
// Weights for hidden neuron 2
{
0.5
,
0.45
,
0.19
}
// Weights for hidden neuron 3
};
// Intermediate outputs and storage for the output layer
std
::
vector
<
double
>
outputLayerBias
=
{
0
};
// Initialize bias for the output neuron
std
::
vector
<
double
>
outputLayerWeightedSum
(
NUM_OF_OUTPUT_NODES
);
// Weighted sum (z2) before applying activation function
// Weights from hidden layer to output layer
std
::
vector
<
std
::
vector
<
double
>>
hiddenToOutputWeights
=
{
{
0.48
,
0.73
,
0.03
}
// Weights for the output neuron
};
// Predicted values after applying the sigmoid activation function
std
::
vector
<
double
>
predictedOutput
(
NUM_OF_OUTPUT_NODES
);
// yhat (predicted values)
// Training data (normalized input features and expected output)
std
::
vector
<
std
::
vector
<
double
>>
normalizedInput
(
2
,
std
::
vector
<
double
>
(
NUM_OF_FEATURES
));
// Normalized input features for training
std
::
vector
<
std
::
vector
<
double
>>
expectedOutput
=
{{
1
}};
// Expected output (labels) for each training example
// Task 1: Perform a forward pass through the network
void
task1
()
{
int
main
()
{
NeuralNetwork
nn
;
// Raw input features before normalization
std
::
vector
<
std
::
vector
<
double
>>
rawInput
=
{
{
23.0
,
40.0
,
100.0
},
// Example 1: temp, hum, air_q
{
22.0
,
39.0
,
101.0
}
// Example 2
};
// Normalize the raw input data
nn
.
normalizeData2D
(
rawInput
,
normalizedInput
);
std
::
cout
<<
"Normalized training input:
\n
"
;
nn
.
printMatrix
(
normalizedInput
.
size
(),
NUM_OF_FEATURES
,
normalizedInput
);
// Step 1: Calculate the weighted sum (z1) for the hidden layer
std
::
vector
<
double
>
flattenedInputToHiddenWeights
;
for
(
const
auto
&
row
:
inputToHiddenWeights
)
{
flattenedInputToHiddenWeights
.
insert
(
flattenedInputToHiddenWeights
.
end
(),
row
.
begin
(),
row
.
end
());
}
nn
.
multipleInputMultipleOutput
(
normalizedInput
[
0
],
flattenedInputToHiddenWeights
,
hiddenLayerBias
,
hiddenLayerWeightedSum
,
NUM_OF_FEATURES
,
NUM_OF_HIDDEN_NODES
);
std
::
cout
<<
"Output vector (z1) for hidden layer:
\n
"
;
for
(
double
val
:
hiddenLayerWeightedSum
)
{
std
::
cout
<<
val
<<
" "
;
}
std
::
cout
<<
"
\n
"
;
// Step 2: Apply ReLU activation to the hidden layer's weighted sum
nn
.
vectorReLU
(
hiddenLayerWeightedSum
,
hiddenLayerOutput
);
// Step 3: Calculate the weighted sum (z2) for the output layer
std
::
vector
<
double
>
flattenedHiddenToOutputWeights
;
for
(
const
auto
&
row
:
hiddenToOutputWeights
)
{
flattenedHiddenToOutputWeights
.
insert
(
flattenedHiddenToOutputWeights
.
end
(),
row
.
begin
(),
row
.
end
());
}
nn
.
multipleInputMultipleOutput
(
hiddenLayerOutput
,
flattenedHiddenToOutputWeights
,
outputLayerBias
,
outputLayerWeightedSum
,
NUM_OF_HIDDEN_NODES
,
NUM_OF_OUTPUT_NODES
);
std
::
cout
<<
"Output vector (z2) for output layer:
\n
"
;
std
::
cout
<<
outputLayerWeightedSum
[
0
]
<<
"
\n
"
;
// Step 4: Apply Sigmoid activation to the output layer's weighted sum
nn
.
vectorSigmoid
(
outputLayerWeightedSum
,
predictedOutput
);
std
::
cout
<<
"Predicted output (yhat) after Sigmoid activation:
\n
"
;
std
::
cout
<<
predictedOutput
[
0
]
<<
"
\n
"
;
// Step 5: Compute the cost (logistic regression cost function)
double
cost
=
nn
.
computeCost
(
1
,
{
predictedOutput
},
expectedOutput
);
std
::
cout
<<
"Cost: "
<<
cost
<<
"
\n
"
;
}
// Task 2: Save and load the network's state
void
task2
()
{
NeuralNetwork
nn
;
const
std
::
string
filename
=
"network_save.txt"
;
// Example input (temperature, humidity, air quality)
std
::
vector
<
double
>
inputVector
=
{
30.0
,
87.0
,
110.0
};
//
Save the network to a file
nn
.
saveNetwork
(
filename
,
NUM_OF_FEATURES
,
NUM_OF_HIDDEN_NODES
,
NUM_OF_OUTPUT_NODES
,
inputToHiddenWeights
,
hiddenLayerBias
,
hiddenToOutputWeights
,
outputLayerBias
);
//
Example expected output (target value)
std
::
vector
<
double
>
expectedOutput
=
{
0.8
};
// Target output for this example
// Clear the weights and biases to simulate loading from a file
for
(
auto
&
row
:
inputToHiddenWeights
)
{
std
::
fill
(
row
.
begin
(),
row
.
end
(),
0.0
);
// Network weights and biases
std
::
vector
<
std
::
vector
<
double
>>
inputToHiddenWeights
=
{
{
0.5
,
-
0.2
,
0.8
},
{
-
0.3
,
0.9
,
0.1
},
{
0.7
,
-
0.5
,
0.2
}
};
std
::
vector
<
double
>
hiddenBiases
=
{
0.0
,
0.0
,
0.0
};
std
::
vector
<
std
::
vector
<
double
>>
hiddenToOutputWeights
=
{
{
0.3
,
-
0.6
,
0.9
}
};
std
::
vector
<
double
>
outputBiases
=
{
0.0
};
// Hyperparameters
double
learningRate
=
0.01
;
int
epochs
=
1000
;
// Perform backpropagation learning
nn
.
backpropagation
(
inputVector
,
expectedOutput
,
inputToHiddenWeights
,
hiddenBiases
,
hiddenToOutputWeights
,
outputBiases
,
learningRate
,
epochs
);
// Test the final output after training
std
::
vector
<
double
>
hiddenLayerOutput
(
inputToHiddenWeights
.
size
(),
0.0
);
std
::
vector
<
double
>
finalOutput
(
hiddenToOutputWeights
.
size
(),
0.0
);
// Forward pass to compute final output
for
(
int
i
=
0
;
i
<
inputToHiddenWeights
.
size
();
++
i
)
{
double
z
=
hiddenBiases
[
i
];
for
(
int
j
=
0
;
j
<
inputVector
.
size
();
++
j
)
{
z
+=
inputVector
[
j
]
*
inputToHiddenWeights
[
i
][
j
];
}
hiddenLayerOutput
[
i
]
=
1.0
/
(
1.0
+
exp
(
-
z
));
// Sigmoid activation
}
std
::
fill
(
hiddenLayerBias
.
begin
(),
hiddenLayerBias
.
end
(),
0.0
);
for
(
auto
&
row
:
hiddenToOutputWeights
)
{
std
::
fill
(
row
.
begin
(),
row
.
end
(),
0.0
);
for
(
int
i
=
0
;
i
<
hiddenToOutputWeights
.
size
();
++
i
)
{
double
z
=
outputBiases
[
i
];
for
(
int
j
=
0
;
j
<
hiddenLayerOutput
.
size
();
++
j
)
{
z
+=
hiddenLayerOutput
[
j
]
*
hiddenToOutputWeights
[
i
][
j
];
}
finalOutput
[
i
]
=
1.0
/
(
1.0
+
exp
(
-
z
));
// Sigmoid activation
}
std
::
fill
(
outputLayerBias
.
begin
(),
outputLayerBias
.
end
(),
0.0
);
std
::
cout
<<
"Network weights and biases cleared to zero.
\n
"
;
// Load the network from the file
nn
.
loadNetwork
(
filename
,
NUM_OF_FEATURES
,
NUM_OF_HIDDEN_NODES
,
NUM_OF_OUTPUT_NODES
,
inputToHiddenWeights
,
hiddenLayerBias
,
hiddenToOutputWeights
,
outputLayerBias
);
// Print final output and compare to expected output
std
::
cout
<<
"Final output after training: "
<<
finalOutput
[
0
]
<<
std
::
endl
;
std
::
cout
<<
"Expected output: "
<<
expectedOutput
[
0
]
<<
std
::
endl
;
// Execute the network after loading the saved state
task1
();
}
int
main
()
{
task1
();
task2
();
return
0
;
}
lab_4/Makefile
0 → 100644
View file @
a4da20db
BIN
=
lab4
CC
=
g++
SRC
=
lab4.cpp ../lib/src/NeuralNetwork.cpp
all
:
$(BIN)
$(BIN)
:
$(SRC)
$(CC)
-o
$(BIN)
$(SRC)
run
:
$(BIN)
./
$(BIN)
\ No newline at end of file
lab_4/lab4.cpp
0 → 100644
View file @
a4da20db
#include <iostream>
#include <vector>
#include "../lib/includes/NeuralNetwork.h"
#define NUM_OF_FEATURES 3 // Number of input features (e.g., temperature, humidity, air quality)
#define NUM_OF_HIDDEN_NODES 3 // Number of neurons in the hidden layer
#define NUM_OF_OUTPUT_NODES 1 // Number of output nodes (e.g., predicted class)
double
learning_rate
=
0.01
;
// Learning rate for updating weights (not used directly in this example)
// Intermediate outputs and storage for the hidden layer
std
::
vector
<
double
>
hiddenLayerOutput
(
NUM_OF_HIDDEN_NODES
);
// Output of the hidden layer (for each example)
std
::
vector
<
double
>
hiddenLayerBias
=
{
0
,
0
,
0
};
// Initialize biases for the hidden layer neurons
std
::
vector
<
double
>
hiddenLayerWeightedSum
(
NUM_OF_HIDDEN_NODES
);
// Weighted sum (z1) before applying activation function
// Weights from input layer to hidden layer
std
::
vector
<
std
::
vector
<
double
>>
inputToHiddenWeights
=
{
{
0.25
,
0.5
,
0.05
},
// Weights for hidden neuron 1
{
0.8
,
0.82
,
0.3
},
// Weights for hidden neuron 2
{
0.5
,
0.45
,
0.19
}
// Weights for hidden neuron 3
};
// Intermediate outputs and storage for the output layer
std
::
vector
<
double
>
outputLayerBias
=
{
0
};
// Initialize bias for the output neuron
std
::
vector
<
double
>
outputLayerWeightedSum
(
NUM_OF_OUTPUT_NODES
);
// Weighted sum (z2) before applying activation function
// Weights from hidden layer to output layer
std
::
vector
<
std
::
vector
<
double
>>
hiddenToOutputWeights
=
{
{
0.48
,
0.73
,
0.03
}
// Weights for the output neuron
};
// Predicted values after applying the sigmoid activation function
std
::
vector
<
double
>
predictedOutput
(
NUM_OF_OUTPUT_NODES
);
// yhat (predicted values)
// Training data (normalized input features and expected output)
std
::
vector
<
std
::
vector
<
double
>>
normalizedInput
(
2
,
std
::
vector
<
double
>
(
NUM_OF_FEATURES
));
// Normalized input features for training
std
::
vector
<
std
::
vector
<
double
>>
expectedOutput
=
{{
1
}};
// Expected output (labels) for each training example
// Task 1: Perform a forward pass through the network
void
task1
()
{
NeuralNetwork
nn
;
// Raw input features before normalization
std
::
vector
<
std
::
vector
<
double
>>
rawInput
=
{
{
23.0
,
40.0
,
100.0
},
// Example 1: temp, hum, air_q
{
22.0
,
39.0
,
101.0
}
// Example 2
};
// Normalize the raw input data
nn
.
normalizeData2D
(
rawInput
,
normalizedInput
);
std
::
cout
<<
"Normalized training input:
\n
"
;
nn
.
printMatrix
(
normalizedInput
.
size
(),
NUM_OF_FEATURES
,
normalizedInput
);
// Step 1: Calculate the weighted sum (z1) for the hidden layer
std
::
vector
<
double
>
flattenedInputToHiddenWeights
;
for
(
const
auto
&
row
:
inputToHiddenWeights
)
{
flattenedInputToHiddenWeights
.
insert
(
flattenedInputToHiddenWeights
.
end
(),
row
.
begin
(),
row
.
end
());
}
nn
.
multipleInputMultipleOutput
(
normalizedInput
[
0
],
flattenedInputToHiddenWeights
,
hiddenLayerBias
,
hiddenLayerWeightedSum
,
NUM_OF_FEATURES
,
NUM_OF_HIDDEN_NODES
);
std
::
cout
<<
"Output vector (z1) for hidden layer:
\n
"
;
for
(
double
val
:
hiddenLayerWeightedSum
)
{
std
::
cout
<<
val
<<
" "
;
}
std
::
cout
<<
"
\n
"
;
// Step 2: Apply ReLU activation to the hidden layer's weighted sum
nn
.
vectorReLU
(
hiddenLayerWeightedSum
,
hiddenLayerOutput
);
// Step 3: Calculate the weighted sum (z2) for the output layer
std
::
vector
<
double
>
flattenedHiddenToOutputWeights
;
for
(
const
auto
&
row
:
hiddenToOutputWeights
)
{
flattenedHiddenToOutputWeights
.
insert
(
flattenedHiddenToOutputWeights
.
end
(),
row
.
begin
(),
row
.
end
());
}
nn
.
multipleInputMultipleOutput
(
hiddenLayerOutput
,
flattenedHiddenToOutputWeights
,
outputLayerBias
,
outputLayerWeightedSum
,
NUM_OF_HIDDEN_NODES
,
NUM_OF_OUTPUT_NODES
);
std
::
cout
<<
"Output vector (z2) for output layer:
\n
"
;
std
::
cout
<<
outputLayerWeightedSum
[
0
]
<<
"
\n
"
;
// Step 4: Apply Sigmoid activation to the output layer's weighted sum
nn
.
vectorSigmoid
(
outputLayerWeightedSum
,
predictedOutput
);
std
::
cout
<<
"Predicted output (yhat) after Sigmoid activation:
\n
"
;
std
::
cout
<<
predictedOutput
[
0
]
<<
"
\n
"
;
// Step 5: Compute the cost (logistic regression cost function)
double
cost
=
nn
.
computeCost
(
1
,
{
predictedOutput
},
expectedOutput
);
std
::
cout
<<
"Cost: "
<<
cost
<<
"
\n
"
;
}
// Task 2: Save and load the network's state
void
task2
()
{
NeuralNetwork
nn
;
const
std
::
string
filename
=
"network_save.txt"
;
// Save the network to a file
nn
.
saveNetwork
(
filename
,
NUM_OF_FEATURES
,
NUM_OF_HIDDEN_NODES
,
NUM_OF_OUTPUT_NODES
,
inputToHiddenWeights
,
hiddenLayerBias
,
hiddenToOutputWeights
,
outputLayerBias
);
// Clear the weights and biases to simulate loading from a file
for
(
auto
&
row
:
inputToHiddenWeights
)
{
std
::
fill
(
row
.
begin
(),
row
.
end
(),
0.0
);
}
std
::
fill
(
hiddenLayerBias
.
begin
(),
hiddenLayerBias
.
end
(),
0.0
);
for
(
auto
&
row
:
hiddenToOutputWeights
)
{
std
::
fill
(
row
.
begin
(),
row
.
end
(),
0.0
);
}
std
::
fill
(
outputLayerBias
.
begin
(),
outputLayerBias
.
end
(),
0.0
);
std
::
cout
<<
"Network weights and biases cleared to zero.
\n
"
;
// Load the network from the file
nn
.
loadNetwork
(
filename
,
NUM_OF_FEATURES
,
NUM_OF_HIDDEN_NODES
,
NUM_OF_OUTPUT_NODES
,
inputToHiddenWeights
,
hiddenLayerBias
,
hiddenToOutputWeights
,
outputLayerBias
);
// Execute the network after loading the saved state
task1
();
}
int
main
()
{
task1
();
task2
();
return
0
;
}
lib/includes/NeuralNetwork.h
View file @
a4da20db
...
...
@@ -30,9 +30,16 @@ public:
// Root Mean Squared Error (RMSE)
double
calculateRMSE
(
double
mse
);
// Brute-force learning
// Brute-force learning
to find the best weight
void
bruteForceLearning
(
double
input
,
double
&
weight
,
double
expectedValue
,
double
learningRate
,
int
maxEpochs
);
// Backpropagation learning function
void
backpropagation
(
const
std
::
vector
<
double
>&
input
,
const
std
::
vector
<
double
>&
expectedOutput
,
std
::
vector
<
std
::
vector
<
double
>>&
inputToHiddenWeights
,
std
::
vector
<
double
>&
hiddenBiases
,
std
::
vector
<
std
::
vector
<
double
>>&
hiddenToOutputWeights
,
std
::
vector
<
double
>&
outputBiases
,
double
learningRate
,
int
epochs
);
// Activation functions (ReLU and Sigmoid)
double
relu
(
double
x
);
double
sigmoid
(
double
x
);
...
...
lib/src/NeuralNetwork.cpp
View file @
a4da20db
...
...
@@ -60,6 +60,24 @@ double NeuralNetwork::sigmoid(double x)
return
0
;
}
double
sigmoid
(
double
x
)
{
return
0
;
}
double
sigmoidDerivative
(
double
x
)
{
return
0
;
}
void
NeuralNetwork
::
backpropagation
(
const
std
::
vector
<
double
>&
input
,
const
std
::
vector
<
double
>&
expectedOutput
,
std
::
vector
<
std
::
vector
<
double
>>&
inputToHiddenWeights
,
std
::
vector
<
double
>&
hiddenBiases
,
std
::
vector
<
std
::
vector
<
double
>>&
hiddenToOutputWeights
,
std
::
vector
<
double
>&
outputBiases
,
double
learningRate
,
int
epochs
)
{
return
;
}
void
NeuralNetwork
::
vectorReLU
(
std
::
vector
<
double
>&
inputVector
,
std
::
vector
<
double
>&
outputVector
)
{
return
;
...
...
@@ -230,4 +248,4 @@ void NeuralNetwork::loadNetwork(const std::string& filename, int numOfFeatures,
file
.
close
();
std
::
cout
<<
"Network loaded from file: "
<<
filename
<<
"
\n
"
;
}
\ No newline at end of file
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment