diff --git a/README.md b/README.md index 1e454681..822e4ef7 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,7 @@ ![](https://github.com/MatthieuHernandez/StraightforwardNeuralNetwork/workflows/Unit%20tests%20Linux/badge.svg?barnch=master) ![](https://github.com/MatthieuHernandez/StraightforwardNeuralNetwork/workflows/Unit%20tests%20Windows/badge.svg?barnch=master) ![](https://github.com/MatthieuHernandez/StraightforwardNeuralNetwork/workflows/Dataset%20tests/badge.svg?barnch=master) +![](https://www.codefactor.io/repository/github/matthieuhernandez/straightforwardneuralnetwork/badge/master) **Straightforward Neural Network** is an open source neural network library in C++20 optimized for CPU. The goal of this library is to make the use of neural networks as easy as possible. diff --git a/src/data/CompositeForTimeSeries.cpp b/src/data/CompositeForTimeSeries.cpp index 2b8cc61a..f4fa6d67 100644 --- a/src/data/CompositeForTimeSeries.cpp +++ b/src/data/CompositeForTimeSeries.cpp @@ -56,7 +56,6 @@ void CompositeForTimeSeries::shuffle() { for (int j = 0; j < this->numberOfRecurrences + 1; ++j) { - const int index = iForIndex * (this->numberOfRecurrences + 1) + j + offset; this->sets[training].shuffledIndexes[index] = this->indexesForShuffling[i] * (this->numberOfRecurrences + 1) + j + offset; diff --git a/src/data/Data.hpp b/src/data/Data.hpp index e70539fd..d09050b5 100644 --- a/src/data/Data.hpp +++ b/src/data/Data.hpp @@ -104,12 +104,9 @@ namespace snn [[nodiscard]] const std::vector& getOutputs(set set, int index); [[nodiscard]] int getLabel(set set, int index) const; - [[nodiscard]] float getSeparator() const; void setSeparator(float value); [[nodiscard]] float getPrecision() const; void setPrecision(float value); - - }; } diff --git a/src/neural_network/layer/FilterLayer.hpp b/src/neural_network/layer/FilterLayer.hpp index bbc2fe21..b85d6b0b 100644 --- a/src/neural_network/layer/FilterLayer.hpp +++ b/src/neural_network/layer/FilterLayer.hpp @@ -33,8 +33,8 @@ namespace snn::internal virtual ~FilterLayer() = default; FilterLayer(const FilterLayer&) = default; - [[nodiscard]] std::vector getShapeOfInput() const override final; - [[nodiscard]] std::vector getShapeOfOutput() const override final; + [[nodiscard]] std::vector getShapeOfInput() const final; + [[nodiscard]] std::vector getShapeOfOutput() const final; [[nodiscard]] int getKernelSize() const; [[nodiscard]] int isValid() const override; diff --git a/src/neural_network/layer/Layer.hpp b/src/neural_network/layer/Layer.hpp index 1c6cafa2..5834717a 100644 --- a/src/neural_network/layer/Layer.hpp +++ b/src/neural_network/layer/Layer.hpp @@ -41,20 +41,20 @@ namespace snn::internal std::vector neurons; std::vector> optimizers; - std::vector output(const std::vector& inputs, bool temporalReset) override final; - std::vector outputForTraining(const std::vector& inputs, bool temporalReset) override final; - std::vector backOutput(std::vector& inputErrors) override final; + std::vector output(const std::vector& inputs, bool temporalReset) final; + std::vector outputForTraining(const std::vector& inputs, bool temporalReset) final; + std::vector backOutput(std::vector& inputErrors) final; - [[nodiscard]] void* getNeuron(int index) override final; - [[nodiscard]] float getAverageOfAbsNeuronWeights() const override final; - [[nodiscard]] float getAverageOfSquareNeuronWeights() const override final; - [[nodiscard]] int getNumberOfInputs() const override final; - [[nodiscard]] int getNumberOfNeurons() const override final; - [[nodiscard]] int getNumberOfParameters() const override final; + [[nodiscard]] void* getNeuron(int index) final; + [[nodiscard]] float getAverageOfAbsNeuronWeights() const final; + [[nodiscard]] float getAverageOfSquareNeuronWeights() const final; + [[nodiscard]] int getNumberOfInputs() const final; + [[nodiscard]] int getNumberOfNeurons() const final; + [[nodiscard]] int getNumberOfParameters() const final; [[nodiscard]] std::vector getShapeOfInput() const override = 0; [[nodiscard]] std::vector getShapeOfOutput() const override = 0; - void train(std::vector& inputErrors) override final; + void train(std::vector& inputErrors) final; [[nodiscard]] int isValid() const override; diff --git a/src/neural_network/layer/LayerFactory.hpp b/src/neural_network/layer/LayerFactory.hpp index 15df8148..46662c7b 100644 --- a/src/neural_network/layer/LayerFactory.hpp +++ b/src/neural_network/layer/LayerFactory.hpp @@ -133,7 +133,6 @@ namespace snn kernelSize, std::vector(), std::vector() - }; return model; } @@ -161,7 +160,6 @@ namespace snn kernelSize, std::vector(), {static_cast(optimizers) ...} - }; return model; } diff --git a/src/neural_network/layer/MaxPooling1D.cpp b/src/neural_network/layer/MaxPooling1D.cpp index 68f0d169..6dd32ce8 100644 --- a/src/neural_network/layer/MaxPooling1D.cpp +++ b/src/neural_network/layer/MaxPooling1D.cpp @@ -46,7 +46,6 @@ void MaxPooling1D::buildKernelIndexes() this->kernelIndexes[k][kernelIndex] = inputIndex; else this->kernelIndexes[k][kernelIndex] = -1; - } } } diff --git a/src/neural_network/layer/SimpleLayer.hpp b/src/neural_network/layer/SimpleLayer.hpp index 3b0d521a..9db90422 100644 --- a/src/neural_network/layer/SimpleLayer.hpp +++ b/src/neural_network/layer/SimpleLayer.hpp @@ -18,9 +18,9 @@ namespace snn::internal void serialize(Archive& ar, unsigned version); protected: - [[nodiscard]] std::vector computeBackOutput(std::vector& inputErrors) override final; - [[nodiscard]] std::vector computeOutput(const std::vector& inputs, bool temporalReset) override final; - virtual void computeTrain(std::vector& inputErrors) override final; + [[nodiscard]] std::vector computeBackOutput(std::vector& inputErrors) final; + [[nodiscard]] std::vector computeOutput(const std::vector& inputs, bool temporalReset) final; + virtual void computeTrain(std::vector& inputErrors) final; public: SimpleLayer() = default; // use restricted to Boost library only @@ -29,12 +29,12 @@ namespace snn::internal virtual ~SimpleLayer() = default; [[nodiscard]] std::unique_ptr clone(std::shared_ptr optimizer) const override; - [[nodiscard]] std::vector getShapeOfInput() const override final; - [[nodiscard]] std::vector getShapeOfOutput() const override final; - [[nodiscard]] int isValid() const override final; + [[nodiscard]] std::vector getShapeOfInput() const final; + [[nodiscard]] std::vector getShapeOfOutput() const final; + [[nodiscard]] int isValid() const final; - bool operator==(const BaseLayer& layer) const override final; - bool operator!=(const BaseLayer& layer) const override final; + bool operator==(const BaseLayer& layer) const final; + bool operator!=(const BaseLayer& layer) const final; }; template diff --git a/src/neural_network/layer/neuron/Neuron.hpp b/src/neural_network/layer/neuron/Neuron.hpp index fc75df3d..de156115 100644 --- a/src/neural_network/layer/neuron/Neuron.hpp +++ b/src/neural_network/layer/neuron/Neuron.hpp @@ -20,7 +20,6 @@ namespace snn::internal void serialize(Archive& ar, unsigned version); protected: - int numberOfInputs; int batchSize; std::vector weights; diff --git a/src/neural_network/layer/neuron/activation_function/ActivationFunction.hpp b/src/neural_network/layer/neuron/activation_function/ActivationFunction.hpp index 47991af6..fb0303b2 100644 --- a/src/neural_network/layer/neuron/activation_function/ActivationFunction.hpp +++ b/src/neural_network/layer/neuron/activation_function/ActivationFunction.hpp @@ -21,7 +21,6 @@ namespace snn::internal class ActivationFunction { private: - friend class boost::serialization::access; template void serialize(Archive& ar, unsigned version); diff --git a/src/neural_network/optimizer/Softmax.cpp b/src/neural_network/optimizer/Softmax.cpp index 39a66485..ec1cd9f0 100644 --- a/src/neural_network/optimizer/Softmax.cpp +++ b/src/neural_network/optimizer/Softmax.cpp @@ -45,7 +45,6 @@ void Softmax::computeSoftmax(std::vector& outputs) else output = value; } - } inline diff --git a/tests/dataset_tests/audio-cats-and-dogs/AudioCatsAndDogsTest.cpp b/tests/dataset_tests/audio-cats-and-dogs/AudioCatsAndDogsTest.cpp index ada3336d..907bb3c9 100644 --- a/tests/dataset_tests/audio-cats-and-dogs/AudioCatsAndDogsTest.cpp +++ b/tests/dataset_tests/audio-cats-and-dogs/AudioCatsAndDogsTest.cpp @@ -38,7 +38,6 @@ TEST_F(AudioCatsAndDogsTest, loadData) TEST_F(AudioCatsAndDogsTest, DISABLED_trainBestNeuralNetwork) { - StraightforwardNeuralNetwork neuralNetwork({ Input(sizeOfOneData), MaxPooling(1600), diff --git a/tests/manual_tests/Cifar10SimpleNetwork.py b/tests/manual_tests/Cifar10SimpleNetwork.py index 516fcf9b..c8eb9fe2 100644 --- a/tests/manual_tests/Cifar10SimpleNetwork.py +++ b/tests/manual_tests/Cifar10SimpleNetwork.py @@ -37,5 +37,5 @@ plt.ylim([0.5, 1]) plt.legend(loc='lower right') -test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2) +test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2) print(test_acc) diff --git a/tests/manual_tests/FashionMnistSimpleNetwork.py b/tests/manual_tests/FashionMnistSimpleNetwork.py index 4ba759e0..0d62fa1b 100644 --- a/tests/manual_tests/FashionMnistSimpleNetwork.py +++ b/tests/manual_tests/FashionMnistSimpleNetwork.py @@ -36,5 +36,5 @@ plt.ylim([0.5, 1]) plt.legend(loc='lower right') -test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2) +test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2) print(test_acc) diff --git a/tests/unit_tests/ConvolutionTests.cpp b/tests/unit_tests/ConvolutionTests.cpp index 06b6b3cb..5d22a475 100644 --- a/tests/unit_tests/ConvolutionTests.cpp +++ b/tests/unit_tests/ConvolutionTests.cpp @@ -64,7 +64,6 @@ TEST(Convolution, LayerConvolution2D) 324, 324, 795, 795, 1425, 1425, 1137, 1137, 666, 666, 411, 411, 944, 944, 1607, 1607, 1220, 1220, 687, 687, 315, 315, 703, 703, 1168, 1168, 865, 865, 477, 477 - }; LayerModel model { convolution, diff --git a/tests/unit_tests/LocallyConnectedTests.cpp b/tests/unit_tests/LocallyConnectedTests.cpp index 327a1aa7..9f79e9c6 100644 --- a/tests/unit_tests/LocallyConnectedTests.cpp +++ b/tests/unit_tests/LocallyConnectedTests.cpp @@ -93,8 +93,7 @@ TEST(LocallyConnected, ComplexeLayerLocallyConnected2D) {73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 1}, {74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 1}, {109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 137, 139, 141, 143, 1}, - {110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 1}, - + {110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 1} }; vector error(8); std::iota(std::begin(input), std::end(input), 1.0f); diff --git a/tests/unit_tests/SoftmaxTests.cpp b/tests/unit_tests/SoftmaxTests.cpp index 4738e8cc..f20867ee 100644 --- a/tests/unit_tests/SoftmaxTests.cpp +++ b/tests/unit_tests/SoftmaxTests.cpp @@ -29,7 +29,6 @@ TEST(Softmax, largeValues) ASSERT_VECTOR_EQ(values1, expectedValues); ASSERT_VECTOR_EQ(values2, expectedValues); - } TEST(Softmax, smallValues)