Commit a4da20db by Nazrul_being

Added lab 4

parent 4540f6da
.DS_Store .DS_Store
build build
\ No newline at end of file .vscode
\ No newline at end of file
#include <iostream> #include <iostream>
#include <vector> #include <vector>
#include <cmath>
#include "../lib/includes/NeuralNetwork.h" #include "../lib/includes/NeuralNetwork.h"
#define NUM_OF_FEATURES 3 // Number of input features (e.g., temperature, humidity, air quality) int main() {
#define NUM_OF_HIDDEN_NODES 3 // Number of neurons in the hidden layer
#define NUM_OF_OUTPUT_NODES 1 // Number of output nodes (e.g., predicted class)
double learning_rate = 0.01; // Learning rate for updating weights (not used directly in this example)
// Intermediate outputs and storage for the hidden layer
std::vector<double> hiddenLayerOutput(NUM_OF_HIDDEN_NODES); // Output of the hidden layer (for each example)
std::vector<double> hiddenLayerBias = {0, 0, 0}; // Initialize biases for the hidden layer neurons
std::vector<double> hiddenLayerWeightedSum(NUM_OF_HIDDEN_NODES); // Weighted sum (z1) before applying activation function
// Weights from input layer to hidden layer
std::vector<std::vector<double>> inputToHiddenWeights = {
{0.25, 0.5, 0.05}, // Weights for hidden neuron 1
{0.8, 0.82, 0.3}, // Weights for hidden neuron 2
{0.5, 0.45, 0.19} // Weights for hidden neuron 3
};
// Intermediate outputs and storage for the output layer
std::vector<double> outputLayerBias = {0}; // Initialize bias for the output neuron
std::vector<double> outputLayerWeightedSum(NUM_OF_OUTPUT_NODES); // Weighted sum (z2) before applying activation function
// Weights from hidden layer to output layer
std::vector<std::vector<double>> hiddenToOutputWeights = {
{0.48, 0.73, 0.03} // Weights for the output neuron
};
// Predicted values after applying the sigmoid activation function
std::vector<double> predictedOutput(NUM_OF_OUTPUT_NODES); // yhat (predicted values)
// Training data (normalized input features and expected output)
std::vector<std::vector<double>> normalizedInput(2, std::vector<double>(NUM_OF_FEATURES)); // Normalized input features for training
std::vector<std::vector<double>> expectedOutput = {{1}}; // Expected output (labels) for each training example
// Task 1: Perform a forward pass through the network
void task1() {
NeuralNetwork nn; NeuralNetwork nn;
// Raw input features before normalization // Example input (temperature, humidity, air quality)
std::vector<std::vector<double>> rawInput = { std::vector<double> inputVector = {30.0, 87.0, 110.0};
{23.0, 40.0, 100.0}, // Example 1: temp, hum, air_q
{22.0, 39.0, 101.0} // Example 2
};
// Normalize the raw input data
nn.normalizeData2D(rawInput, normalizedInput);
std::cout << "Normalized training input:\n";
nn.printMatrix(normalizedInput.size(), NUM_OF_FEATURES, normalizedInput);
// Step 1: Calculate the weighted sum (z1) for the hidden layer
std::vector<double> flattenedInputToHiddenWeights;
for (const auto& row : inputToHiddenWeights) {
flattenedInputToHiddenWeights.insert(flattenedInputToHiddenWeights.end(), row.begin(), row.end());
}
nn.multipleInputMultipleOutput(normalizedInput[0], flattenedInputToHiddenWeights, hiddenLayerBias, hiddenLayerWeightedSum, NUM_OF_FEATURES, NUM_OF_HIDDEN_NODES);
std::cout << "Output vector (z1) for hidden layer:\n";
for (double val : hiddenLayerWeightedSum) {
std::cout << val << " ";
}
std::cout << "\n";
// Step 2: Apply ReLU activation to the hidden layer's weighted sum
nn.vectorReLU(hiddenLayerWeightedSum, hiddenLayerOutput);
// Step 3: Calculate the weighted sum (z2) for the output layer
std::vector<double> flattenedHiddenToOutputWeights;
for (const auto& row : hiddenToOutputWeights) {
flattenedHiddenToOutputWeights.insert(flattenedHiddenToOutputWeights.end(), row.begin(), row.end());
}
nn.multipleInputMultipleOutput(hiddenLayerOutput, flattenedHiddenToOutputWeights, outputLayerBias, outputLayerWeightedSum, NUM_OF_HIDDEN_NODES, NUM_OF_OUTPUT_NODES);
std::cout << "Output vector (z2) for output layer:\n";
std::cout << outputLayerWeightedSum[0] << "\n";
// Step 4: Apply Sigmoid activation to the output layer's weighted sum
nn.vectorSigmoid(outputLayerWeightedSum, predictedOutput);
std::cout << "Predicted output (yhat) after Sigmoid activation:\n";
std::cout << predictedOutput[0] << "\n";
// Step 5: Compute the cost (logistic regression cost function)
double cost = nn.computeCost(1, {predictedOutput}, expectedOutput);
std::cout << "Cost: " << cost << "\n";
}
// Task 2: Save and load the network's state
void task2() {
NeuralNetwork nn;
const std::string filename = "network_save.txt";
// Save the network to a file // Example expected output (target value)
nn.saveNetwork(filename, NUM_OF_FEATURES, NUM_OF_HIDDEN_NODES, NUM_OF_OUTPUT_NODES, inputToHiddenWeights, hiddenLayerBias, hiddenToOutputWeights, outputLayerBias); std::vector<double> expectedOutput = {0.8}; // Target output for this example
// Clear the weights and biases to simulate loading from a file // Network weights and biases
for (auto& row : inputToHiddenWeights) { std::vector<std::vector<double>> inputToHiddenWeights = {
std::fill(row.begin(), row.end(), 0.0); {0.5, -0.2, 0.8},
{-0.3, 0.9, 0.1},
{0.7, -0.5, 0.2}
};
std::vector<double> hiddenBiases = {0.0, 0.0, 0.0};
std::vector<std::vector<double>> hiddenToOutputWeights = {
{0.3, -0.6, 0.9}
};
std::vector<double> outputBiases = {0.0};
// Hyperparameters
double learningRate = 0.01;
int epochs = 1000;
// Perform backpropagation learning
nn.backpropagation(inputVector, expectedOutput, inputToHiddenWeights, hiddenBiases, hiddenToOutputWeights, outputBiases, learningRate, epochs);
// Test the final output after training
std::vector<double> hiddenLayerOutput(inputToHiddenWeights.size(), 0.0);
std::vector<double> finalOutput(hiddenToOutputWeights.size(), 0.0);
// Forward pass to compute final output
for (int i = 0; i < inputToHiddenWeights.size(); ++i) {
double z = hiddenBiases[i];
for (int j = 0; j < inputVector.size(); ++j) {
z += inputVector[j] * inputToHiddenWeights[i][j];
}
hiddenLayerOutput[i] = 1.0 / (1.0 + exp(-z)); // Sigmoid activation
} }
std::fill(hiddenLayerBias.begin(), hiddenLayerBias.end(), 0.0);
for (auto& row : hiddenToOutputWeights) { for (int i = 0; i < hiddenToOutputWeights.size(); ++i) {
std::fill(row.begin(), row.end(), 0.0); double z = outputBiases[i];
for (int j = 0; j < hiddenLayerOutput.size(); ++j) {
z += hiddenLayerOutput[j] * hiddenToOutputWeights[i][j];
}
finalOutput[i] = 1.0 / (1.0 + exp(-z)); // Sigmoid activation
} }
std::fill(outputLayerBias.begin(), outputLayerBias.end(), 0.0);
std::cout << "Network weights and biases cleared to zero.\n";
// Load the network from the file // Print final output and compare to expected output
nn.loadNetwork(filename, NUM_OF_FEATURES, NUM_OF_HIDDEN_NODES, NUM_OF_OUTPUT_NODES, inputToHiddenWeights, hiddenLayerBias, hiddenToOutputWeights, outputLayerBias); std::cout << "Final output after training: " << finalOutput[0] << std::endl;
std::cout << "Expected output: " << expectedOutput[0] << std::endl;
// Execute the network after loading the saved state
task1();
}
int main() {
task1();
task2();
return 0; return 0;
} }
BIN = lab4
CC = g++
SRC = lab4.cpp ../lib/src/NeuralNetwork.cpp
all: $(BIN)
$(BIN): $(SRC)
$(CC) -o $(BIN) $(SRC)
run: $(BIN)
./$(BIN)
\ No newline at end of file
#include <iostream>
#include <vector>
#include "../lib/includes/NeuralNetwork.h"
#define NUM_OF_FEATURES 3 // Number of input features (e.g., temperature, humidity, air quality)
#define NUM_OF_HIDDEN_NODES 3 // Number of neurons in the hidden layer
#define NUM_OF_OUTPUT_NODES 1 // Number of output nodes (e.g., predicted class)
double learning_rate = 0.01; // Learning rate for updating weights (not used directly in this example)
// Intermediate outputs and storage for the hidden layer
std::vector<double> hiddenLayerOutput(NUM_OF_HIDDEN_NODES); // Output of the hidden layer (for each example)
std::vector<double> hiddenLayerBias = {0, 0, 0}; // Initialize biases for the hidden layer neurons
std::vector<double> hiddenLayerWeightedSum(NUM_OF_HIDDEN_NODES); // Weighted sum (z1) before applying activation function
// Weights from input layer to hidden layer
std::vector<std::vector<double>> inputToHiddenWeights = {
{0.25, 0.5, 0.05}, // Weights for hidden neuron 1
{0.8, 0.82, 0.3}, // Weights for hidden neuron 2
{0.5, 0.45, 0.19} // Weights for hidden neuron 3
};
// Intermediate outputs and storage for the output layer
std::vector<double> outputLayerBias = {0}; // Initialize bias for the output neuron
std::vector<double> outputLayerWeightedSum(NUM_OF_OUTPUT_NODES); // Weighted sum (z2) before applying activation function
// Weights from hidden layer to output layer
std::vector<std::vector<double>> hiddenToOutputWeights = {
{0.48, 0.73, 0.03} // Weights for the output neuron
};
// Predicted values after applying the sigmoid activation function
std::vector<double> predictedOutput(NUM_OF_OUTPUT_NODES); // yhat (predicted values)
// Training data (normalized input features and expected output)
std::vector<std::vector<double>> normalizedInput(2, std::vector<double>(NUM_OF_FEATURES)); // Normalized input features for training
std::vector<std::vector<double>> expectedOutput = {{1}}; // Expected output (labels) for each training example
// Task 1: Perform a forward pass through the network
void task1() {
NeuralNetwork nn;
// Raw input features before normalization
std::vector<std::vector<double>> rawInput = {
{23.0, 40.0, 100.0}, // Example 1: temp, hum, air_q
{22.0, 39.0, 101.0} // Example 2
};
// Normalize the raw input data
nn.normalizeData2D(rawInput, normalizedInput);
std::cout << "Normalized training input:\n";
nn.printMatrix(normalizedInput.size(), NUM_OF_FEATURES, normalizedInput);
// Step 1: Calculate the weighted sum (z1) for the hidden layer
std::vector<double> flattenedInputToHiddenWeights;
for (const auto& row : inputToHiddenWeights) {
flattenedInputToHiddenWeights.insert(flattenedInputToHiddenWeights.end(), row.begin(), row.end());
}
nn.multipleInputMultipleOutput(normalizedInput[0], flattenedInputToHiddenWeights, hiddenLayerBias, hiddenLayerWeightedSum, NUM_OF_FEATURES, NUM_OF_HIDDEN_NODES);
std::cout << "Output vector (z1) for hidden layer:\n";
for (double val : hiddenLayerWeightedSum) {
std::cout << val << " ";
}
std::cout << "\n";
// Step 2: Apply ReLU activation to the hidden layer's weighted sum
nn.vectorReLU(hiddenLayerWeightedSum, hiddenLayerOutput);
// Step 3: Calculate the weighted sum (z2) for the output layer
std::vector<double> flattenedHiddenToOutputWeights;
for (const auto& row : hiddenToOutputWeights) {
flattenedHiddenToOutputWeights.insert(flattenedHiddenToOutputWeights.end(), row.begin(), row.end());
}
nn.multipleInputMultipleOutput(hiddenLayerOutput, flattenedHiddenToOutputWeights, outputLayerBias, outputLayerWeightedSum, NUM_OF_HIDDEN_NODES, NUM_OF_OUTPUT_NODES);
std::cout << "Output vector (z2) for output layer:\n";
std::cout << outputLayerWeightedSum[0] << "\n";
// Step 4: Apply Sigmoid activation to the output layer's weighted sum
nn.vectorSigmoid(outputLayerWeightedSum, predictedOutput);
std::cout << "Predicted output (yhat) after Sigmoid activation:\n";
std::cout << predictedOutput[0] << "\n";
// Step 5: Compute the cost (logistic regression cost function)
double cost = nn.computeCost(1, {predictedOutput}, expectedOutput);
std::cout << "Cost: " << cost << "\n";
}
// Task 2: Save and load the network's state
void task2() {
NeuralNetwork nn;
const std::string filename = "network_save.txt";
// Save the network to a file
nn.saveNetwork(filename, NUM_OF_FEATURES, NUM_OF_HIDDEN_NODES, NUM_OF_OUTPUT_NODES, inputToHiddenWeights, hiddenLayerBias, hiddenToOutputWeights, outputLayerBias);
// Clear the weights and biases to simulate loading from a file
for (auto& row : inputToHiddenWeights) {
std::fill(row.begin(), row.end(), 0.0);
}
std::fill(hiddenLayerBias.begin(), hiddenLayerBias.end(), 0.0);
for (auto& row : hiddenToOutputWeights) {
std::fill(row.begin(), row.end(), 0.0);
}
std::fill(outputLayerBias.begin(), outputLayerBias.end(), 0.0);
std::cout << "Network weights and biases cleared to zero.\n";
// Load the network from the file
nn.loadNetwork(filename, NUM_OF_FEATURES, NUM_OF_HIDDEN_NODES, NUM_OF_OUTPUT_NODES, inputToHiddenWeights, hiddenLayerBias, hiddenToOutputWeights, outputLayerBias);
// Execute the network after loading the saved state
task1();
}
int main() {
task1();
task2();
return 0;
}
...@@ -30,9 +30,16 @@ public: ...@@ -30,9 +30,16 @@ public:
// Root Mean Squared Error (RMSE) // Root Mean Squared Error (RMSE)
double calculateRMSE(double mse); double calculateRMSE(double mse);
// Brute-force learning // Brute-force learning to find the best weight
void bruteForceLearning(double input, double& weight, double expectedValue, double learningRate, int maxEpochs); void bruteForceLearning(double input, double& weight, double expectedValue, double learningRate, int maxEpochs);
// Backpropagation learning function
void backpropagation(const std::vector<double>& input, const std::vector<double>& expectedOutput,
std::vector<std::vector<double>>& inputToHiddenWeights, std::vector<double>& hiddenBiases,
std::vector<std::vector<double>>& hiddenToOutputWeights, std::vector<double>& outputBiases,
double learningRate, int epochs);
// Activation functions (ReLU and Sigmoid) // Activation functions (ReLU and Sigmoid)
double relu(double x); double relu(double x);
double sigmoid(double x); double sigmoid(double x);
......
...@@ -60,6 +60,24 @@ double NeuralNetwork::sigmoid(double x) ...@@ -60,6 +60,24 @@ double NeuralNetwork::sigmoid(double x)
return 0; return 0;
} }
double sigmoid(double x)
{
return 0;
}
double sigmoidDerivative(double x)
{
return 0;
}
void NeuralNetwork::backpropagation(const std::vector<double>& input, const std::vector<double>& expectedOutput,
std::vector<std::vector<double>>& inputToHiddenWeights, std::vector<double>& hiddenBiases,
std::vector<std::vector<double>>& hiddenToOutputWeights, std::vector<double>& outputBiases,
double learningRate, int epochs)
{
return;
}
void NeuralNetwork::vectorReLU(std::vector<double>& inputVector, std::vector<double>& outputVector) void NeuralNetwork::vectorReLU(std::vector<double>& inputVector, std::vector<double>& outputVector)
{ {
return; return;
...@@ -230,4 +248,4 @@ void NeuralNetwork::loadNetwork(const std::string& filename, int numOfFeatures, ...@@ -230,4 +248,4 @@ void NeuralNetwork::loadNetwork(const std::string& filename, int numOfFeatures,
file.close(); file.close();
std::cout << "Network loaded from file: " << filename << "\n"; std::cout << "Network loaded from file: " << filename << "\n";
} }
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment