Skip to content

Commit

Permalink
Merge pull request #61 from MatthieuHernandez/Improve_code_quality
Browse files Browse the repository at this point in the history
Add CodeFactor
  • Loading branch information
MatthieuHernandez authored Dec 9, 2023
2 parents 50afbd9 + 7b7df1d commit 2c1b7e4
Show file tree
Hide file tree
Showing 17 changed files with 24 additions and 37 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
![](https://github.com/MatthieuHernandez/StraightforwardNeuralNetwork/workflows/Unit%20tests%20Linux/badge.svg?barnch=master)
![](https://github.com/MatthieuHernandez/StraightforwardNeuralNetwork/workflows/Unit%20tests%20Windows/badge.svg?barnch=master)
![](https://github.com/MatthieuHernandez/StraightforwardNeuralNetwork/workflows/Dataset%20tests/badge.svg?barnch=master)
![](https://www.codefactor.io/repository/github/matthieuhernandez/straightforwardneuralnetwork/badge/master)

**Straightforward Neural Network** is an open source neural network library in C++20 optimized for CPU. The goal of this library is to make the use of neural networks as easy as possible.

Expand Down
1 change: 0 additions & 1 deletion src/data/CompositeForTimeSeries.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,6 @@ void CompositeForTimeSeries::shuffle()
{
for (int j = 0; j < this->numberOfRecurrences + 1; ++j)
{

const int index = iForIndex * (this->numberOfRecurrences + 1) + j + offset;
this->sets[training].shuffledIndexes[index] = this->indexesForShuffling[i] * (this->numberOfRecurrences + 1) + j + offset;

Expand Down
3 changes: 0 additions & 3 deletions src/data/Data.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -104,12 +104,9 @@ namespace snn
[[nodiscard]] const std::vector<float>& getOutputs(set set, int index);
[[nodiscard]] int getLabel(set set, int index) const;


[[nodiscard]] float getSeparator() const;
void setSeparator(float value);
[[nodiscard]] float getPrecision() const;
void setPrecision(float value);


};
}
4 changes: 2 additions & 2 deletions src/neural_network/layer/FilterLayer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ namespace snn::internal
virtual ~FilterLayer() = default;
FilterLayer(const FilterLayer&) = default;

[[nodiscard]] std::vector<int> getShapeOfInput() const override final;
[[nodiscard]] std::vector<int> getShapeOfOutput() const override final;
[[nodiscard]] std::vector<int> getShapeOfInput() const final;
[[nodiscard]] std::vector<int> getShapeOfOutput() const final;
[[nodiscard]] int getKernelSize() const;
[[nodiscard]] int isValid() const override;

Expand Down
20 changes: 10 additions & 10 deletions src/neural_network/layer/Layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,20 +41,20 @@ namespace snn::internal
std::vector<N> neurons;
std::vector<std::unique_ptr<LayerOptimizer>> optimizers;

std::vector<float> output(const std::vector<float>& inputs, bool temporalReset) override final;
std::vector<float> outputForTraining(const std::vector<float>& inputs, bool temporalReset) override final;
std::vector<float> backOutput(std::vector<float>& inputErrors) override final;
std::vector<float> output(const std::vector<float>& inputs, bool temporalReset) final;
std::vector<float> outputForTraining(const std::vector<float>& inputs, bool temporalReset) final;
std::vector<float> backOutput(std::vector<float>& inputErrors) final;

[[nodiscard]] void* getNeuron(int index) override final;
[[nodiscard]] float getAverageOfAbsNeuronWeights() const override final;
[[nodiscard]] float getAverageOfSquareNeuronWeights() const override final;
[[nodiscard]] int getNumberOfInputs() const override final;
[[nodiscard]] int getNumberOfNeurons() const override final;
[[nodiscard]] int getNumberOfParameters() const override final;
[[nodiscard]] void* getNeuron(int index) final;
[[nodiscard]] float getAverageOfAbsNeuronWeights() const final;
[[nodiscard]] float getAverageOfSquareNeuronWeights() const final;
[[nodiscard]] int getNumberOfInputs() const final;
[[nodiscard]] int getNumberOfNeurons() const final;
[[nodiscard]] int getNumberOfParameters() const final;
[[nodiscard]] std::vector<int> getShapeOfInput() const override = 0;
[[nodiscard]] std::vector<int> getShapeOfOutput() const override = 0;

void train(std::vector<float>& inputErrors) override final;
void train(std::vector<float>& inputErrors) final;

[[nodiscard]] int isValid() const override;

Expand Down
2 changes: 0 additions & 2 deletions src/neural_network/layer/LayerFactory.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,6 @@ namespace snn
kernelSize,
std::vector<int>(),
std::vector<LayerOptimizerModel>()

};
return model;
}
Expand Down Expand Up @@ -161,7 +160,6 @@ namespace snn
kernelSize,
std::vector<int>(),
{static_cast<LayerOptimizerModel>(optimizers) ...}

};
return model;
}
Expand Down
1 change: 0 additions & 1 deletion src/neural_network/layer/MaxPooling1D.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@ void MaxPooling1D::buildKernelIndexes()
this->kernelIndexes[k][kernelIndex] = inputIndex;
else
this->kernelIndexes[k][kernelIndex] = -1;

}
}
}
Expand Down
16 changes: 8 additions & 8 deletions src/neural_network/layer/SimpleLayer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@ namespace snn::internal
void serialize(Archive& ar, unsigned version);

protected:
[[nodiscard]] std::vector<float> computeBackOutput(std::vector<float>& inputErrors) override final;
[[nodiscard]] std::vector<float> computeOutput(const std::vector<float>& inputs, bool temporalReset) override final;
virtual void computeTrain(std::vector<float>& inputErrors) override final;
[[nodiscard]] std::vector<float> computeBackOutput(std::vector<float>& inputErrors) final;
[[nodiscard]] std::vector<float> computeOutput(const std::vector<float>& inputs, bool temporalReset) final;
virtual void computeTrain(std::vector<float>& inputErrors) final;

public:
SimpleLayer() = default; // use restricted to Boost library only
Expand All @@ -29,12 +29,12 @@ namespace snn::internal
virtual ~SimpleLayer() = default;
[[nodiscard]] std::unique_ptr<BaseLayer> clone(std::shared_ptr<NeuralNetworkOptimizer> optimizer) const override;

[[nodiscard]] std::vector<int> getShapeOfInput() const override final;
[[nodiscard]] std::vector<int> getShapeOfOutput() const override final;
[[nodiscard]] int isValid() const override final;
[[nodiscard]] std::vector<int> getShapeOfInput() const final;
[[nodiscard]] std::vector<int> getShapeOfOutput() const final;
[[nodiscard]] int isValid() const final;

bool operator==(const BaseLayer& layer) const override final;
bool operator!=(const BaseLayer& layer) const override final;
bool operator==(const BaseLayer& layer) const final;
bool operator!=(const BaseLayer& layer) const final;
};

template <BaseNeuron N>
Expand Down
1 change: 0 additions & 1 deletion src/neural_network/layer/neuron/Neuron.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ namespace snn::internal
void serialize(Archive& ar, unsigned version);

protected:

int numberOfInputs;
int batchSize;
std::vector<float> weights;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ namespace snn::internal
class ActivationFunction
{
private:

friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, unsigned version);
Expand Down
1 change: 0 additions & 1 deletion src/neural_network/optimizer/Softmax.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@ void Softmax::computeSoftmax(std::vector<float>& outputs)
else
output = value;
}

}

inline
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ TEST_F(AudioCatsAndDogsTest, loadData)

TEST_F(AudioCatsAndDogsTest, DISABLED_trainBestNeuralNetwork)
{

StraightforwardNeuralNetwork neuralNetwork({
Input(sizeOfOneData),
MaxPooling(1600),
Expand Down
2 changes: 1 addition & 1 deletion tests/manual_tests/Cifar10SimpleNetwork.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,5 +37,5 @@
plt.ylim([0.5, 1])
plt.legend(loc='lower right')

test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print(test_acc)
2 changes: 1 addition & 1 deletion tests/manual_tests/FashionMnistSimpleNetwork.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,5 +36,5 @@
plt.ylim([0.5, 1])
plt.legend(loc='lower right')

test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print(test_acc)
1 change: 0 additions & 1 deletion tests/unit_tests/ConvolutionTests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ TEST(Convolution, LayerConvolution2D)
324, 324, 795, 795, 1425, 1425, 1137, 1137, 666, 666,
411, 411, 944, 944, 1607, 1607, 1220, 1220, 687, 687,
315, 315, 703, 703, 1168, 1168, 865, 865, 477, 477

};
LayerModel model {
convolution,
Expand Down
3 changes: 1 addition & 2 deletions tests/unit_tests/LocallyConnectedTests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,7 @@ TEST(LocallyConnected, ComplexeLayerLocallyConnected2D)
{73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 1},
{74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 1},
{109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 137, 139, 141, 143, 1},
{110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 1},

{110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 1}
};
vector<float> error(8);
std::iota(std::begin(input), std::end(input), 1.0f);
Expand Down
1 change: 0 additions & 1 deletion tests/unit_tests/SoftmaxTests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ TEST(Softmax, largeValues)

ASSERT_VECTOR_EQ(values1, expectedValues);
ASSERT_VECTOR_EQ(values2, expectedValues);

}

TEST(Softmax, smallValues)
Expand Down

0 comments on commit 2c1b7e4

Please sign in to comment.