diff --git a/include/cn24.h b/include/cn24.h index a4ed5f1..37aa7a8 100644 --- a/include/cn24.h +++ b/include/cn24.h @@ -58,12 +58,10 @@ #include "cn24/net/ConcatenationLayer.h" #include "cn24/net/GradientAccumulationLayer.h" #include "cn24/net/SumLayer.h" -#include "cn24/net/Net.h" #include "cn24/net/Trainer.h" #include "cn24/net/NetGraph.h" #include "cn24/net/NetStatus.h" #include "cn24/factory/ConfigurableFactory.h" -#include "cn24/factory/SkipLayerNetworkFactory.h" #endif diff --git a/include/cn24/factory/ConfigurableFactory.h b/include/cn24/factory/ConfigurableFactory.h index 2f172ca..6eed5d8 100644 --- a/include/cn24/factory/ConfigurableFactory.h +++ b/include/cn24/factory/ConfigurableFactory.h @@ -17,7 +17,6 @@ #include -#include "../net/Net.h" #include "../net/NetGraph.h" #include "../net/Trainer.h" #include "../util/Dataset.h" @@ -27,7 +26,6 @@ namespace Conv { class Factory { public: - virtual int AddLayers(Net& net, Connection data_layer_connection, const unsigned int output_classes, bool add_loss_layer = false, std::ostream& graph_output = std::cout) = 0; virtual bool AddLayers(NetGraph& graph, NetGraphConnection data_layer_connection, const unsigned int output_classes, bool add_loss_layer = false) = 0; virtual int patchsizex() = 0; virtual int patchsizey() = 0; @@ -52,17 +50,14 @@ class ConfigurableFactory : public Factory { /** * @brief Adds the configured layers to a network using the specified input layer * - * @param net The net to add the layers to + * @param graph The NetGraph to add the layers to * @param data_layer_connection Input to the first layer of this configuration * @param output_classes The number of output neurons. This also affects the activation function of * the last layer: for output_classes=1, tanh is used. Otherwise, sigm is used. * @param add_loss_layer If set to true, the factory also adds a matching loss layer - * @param graph_output An output stream. The factory will write the layout in graphviz format into this string. * - * @returns The layer id of the output layer + * @returns Whether the net is complete */ - virtual int AddLayers(Net& net, Connection data_layer_connection, const unsigned int output_classes, bool add_loss_layer = false, std::ostream& graph_output = std::cout); - virtual bool AddLayers(NetGraph& graph, NetGraphConnection data_layer_connection, const unsigned int output_classes, bool add_loss_layer = false); /** diff --git a/include/cn24/factory/SkipLayerNetworkFactory.h b/include/cn24/factory/SkipLayerNetworkFactory.h deleted file mode 100644 index 8b97f8b..0000000 --- a/include/cn24/factory/SkipLayerNetworkFactory.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef CONV_SKIPLAYERNETWORKFACTORY_H -#define CONV_SKIPLAYERNETWORKFACTORY_H - -#include - -#include "../net/Net.h" -#include "../net/NetGraph.h" -#include "../net/Trainer.h" -#include "../util/Dataset.h" -#include "../util/Log.h" -#include "ConfigurableFactory.h" - -namespace Conv { - -class SkipLayerNetworkFactory : public Factory { - int AddLayers(Net& net, Connection data_layer_connection, const unsigned int output_classes, bool add_loss_layer = false, std::ostream& graph_output = std::cout); - bool AddLayers(NetGraph& graph, NetGraphConnection data_layer_connection, const unsigned int output_classes, bool add_loss_layer = false); - int patchsizex(); - int patchsizey(); - Layer* CreateLossLayer(const unsigned int output_classes, const datum loss_weight = 1.0); - void InitOptimalSettings(); - TrainerSettings optimal_settings() const; - Method method() const; -}; - -} - -#endif \ No newline at end of file diff --git a/include/cn24/net/Layer.h b/include/cn24/net/Layer.h index ec16d84..3eb8f2b 100644 --- a/include/cn24/net/Layer.h +++ b/include/cn24/net/Layer.h @@ -110,7 +110,7 @@ class Layer { virtual bool IsOpenCLAware() { return false; } virtual std::string GetLayerDescription() { return "Layer"; } - virtual void CreateBufferDescriptors(std::vector& buffers) {} + virtual void CreateBufferDescriptors(std::vector& buffers) {UNREFERENCED_PARAMETER(buffers);} protected: /** * @brief These CombinedTensors contain the weights and biases. diff --git a/include/cn24/net/Net.h b/include/cn24/net/Net.h deleted file mode 100644 index ece0f87..0000000 --- a/include/cn24/net/Net.h +++ /dev/null @@ -1,210 +0,0 @@ -/* - * This file is part of the CN24 semantic segmentation software, - * copyright (C) 2015 Clemens-Alexander Brust (ikosa dot de at gmail dot com). - * - * For licensing information, see the LICENSE file included with this project. - */ -/** - * @file Net.h - * @class Net - * @brief This is a connected collection of Layers. - * - * @author Clemens-Alexander Brust (ikosa dot de at gmail dot com) - */ - -#ifndef CONV_NET_H -#define CONV_NET_H - -#include -#include -#include -#include - -#include "Layer.h" -#include "LossFunctionLayer.h" -#include "TrainingLayer.h" -#include "StatLayer.h" -#include "BinaryStatLayer.h" -#include "ConfusionMatrixLayer.h" - -#include "NetStatus.h" - -namespace Conv { - -class Trainer; -class GradientTester; - -struct Connection { -public: - Connection () : net(0), output(0) {} - Connection (const int net, const int output = 0) : - net (net), output (output) { } - int net; - int output; -}; - -class Net : public NetStatus { - friend class Trainer; - friend class GradientTester; -public: - /** - * @brief Adds a layer to the network. - * - * @param layer The layer to add - * @param connections The inputs to the layer - * @returns The id of the layer in the network - */ - int AddLayer (Layer* layer, const std::vector& connections = - std::vector()); - - /** - * @brief Adds a layer to the network. - * - * @param layer The layer to add - * @param input_layer The input to the layer (output 0 is used) - * @returns The id of the layer in the network - */ - int AddLayer (Layer* layer, const int input_layer); - - /** - * @brief Initializes the weights. - */ - void InitializeWeights(); - - /** - * @brief Complete forward pass. - * - * Calls every Layer's FeedForward function. - */ - void FeedForward(); - - /** - * @brief Forward pass up to the specified layer - * - * @param last Layer id of the last layer to process - */ - void FeedForward(const unsigned int last); - - /** - * @brief Complete backward pass. - * - * Calls every Layer's BackPropagate function. - */ - void BackPropagate(); - - /** - * @brief Collects every Layer's parameters. - * - * @param parameters Vector to store the parameters in - */ - void GetParameters(std::vector& parameters); - - /** - * @brief Writes the params to a Tensor file. - * - * @param output Stream to write the Tensors to - */ - void SerializeParameters(std::ostream& output); - - /** - * @brief Reads the parameters from a Tensor file. - * - * @param input Stream to read the Tensors from - * @param last_layer The id of the last layer to load parameters into, - * for fine-tuning. Set to zero for all layers. - */ - void DeserializeParameters(std::istream& input, unsigned int last_layer = 0); - - /** - * @brief Gets the training layer. - */ - inline TrainingLayer* training_layer() { - return training_layer_; - } - - /** - * @brief Gets the loss function layer. - */ - inline LossFunctionLayer* lossfunction_layer() { - return lossfunction_layer_; - } - - /** - * @brief Gets the stat layers. - */ - inline std::vector& stat_layers() { - return stat_layers_; - } - - /** - * @brief Gets the binary stat layer. - */ - inline BinaryStatLayer* binary_stat_layer() { - return binary_stat_layer_; - } - - /** - * @brief Gets the confusion matrix layer. - */ - inline ConfusionMatrixLayer* confusion_matrix_layer() { - return confusion_matrix_layer_; - } - - /** - * @brief Gets the layer with the corresponding id - */ - inline Layer* layer(int layer_id) const { - return layers_[layer_id]; - } - - /** - * @brief Returns the output buffer of the given layer - */ - inline CombinedTensor* buffer(int layer_id, int buffer_id = 0) const { - return buffers_[layer_id][buffer_id]; - } - - /** - * @brief Enables or disables the binary stat layer - */ - inline void SetTestOnlyStatDisabled(const bool disabled = false) { - if(binary_stat_layer_ != nullptr) { - LOGDEBUG << "Binary stat layer disabled: " << disabled; - binary_stat_layer_->SetDisabled(disabled); - } - - if(confusion_matrix_layer_ != nullptr) { - LOGDEBUG << "Confusion matrix layer disabled: " << disabled; - confusion_matrix_layer_->SetDisabled(disabled); - } - } - - /** - * @brief Enables the built-in layer view GUI. Needs CMake build option. - */ - inline void SetLayerViewEnabled(const bool enabled = true) { - LOGDEBUG << "Layer view enabled: " << enabled; - layer_view_enabled_ = enabled; - } - - void PrintAndResetLayerTime(datum samples); -private: - TrainingLayer* training_layer_ = nullptr; - LossFunctionLayer* lossfunction_layer_ = nullptr; - BinaryStatLayer* binary_stat_layer_ = nullptr; - ConfusionMatrixLayer* confusion_matrix_layer_ = nullptr; - std::vector stat_layers_; - std::vector layers_; - std::vector> buffers_; - std::vector> inputs_; - std::vector> weight_connections_; - - bool layer_view_enabled_ = false; - - std::chrono::duration* forward_durations_ = nullptr; - std::chrono::duration* backward_durations_ = nullptr; -}; - -} - -#endif diff --git a/include/cn24/util/Config.h b/include/cn24/util/Config.h index 9a7f226..98cfcd9 100644 --- a/include/cn24/util/Config.h +++ b/include/cn24/util/Config.h @@ -40,6 +40,9 @@ typedef u_int32_t duint; #define UCHAR_FROM_DATUM(x) ((unsigned char) (255.0f * ((Conv::datum)x) ) ) #define MCHAR_FROM_DATUM(x) ((unsigned char) (127.0f + 127.0f * ((Conv::datum)x) ) ) +// use this macro to suppress compiler warnings for unused variables +#define UNREFERENCED_PARAMETER(x) (void)x + } diff --git a/include/cn24/util/Dataset.h b/include/cn24/util/Dataset.h index 22754f0..18a9596 100644 --- a/include/cn24/util/Dataset.h +++ b/include/cn24/util/Dataset.h @@ -223,8 +223,7 @@ class TensorStreamDataset : public Dataset { std::vector class_names, std::vector class_colors, std::vector class_weights, - dataset_localized_error_function error_function = DefaultLocalizedErrorFunction, - int training_fd = 0, int testing_fd = 0); + dataset_localized_error_function error_function = DefaultLocalizedErrorFunction); // Dataset implementations virtual Task GetTask() const; diff --git a/include/cn24/util/StatAggregator.h b/include/cn24/util/StatAggregator.h index 765dab9..93248a8 100644 --- a/include/cn24/util/StatAggregator.h +++ b/include/cn24/util/StatAggregator.h @@ -56,10 +56,10 @@ struct StatDescriptor { std::string unit = ""; // Lambdas for processing - std::function init_function = [] (Stat& stat) {}; - std::function update_function = [] (Stat& stat, double user_value) {}; + std::function init_function = [] (Stat& stat) {UNREFERENCED_PARAMETER(stat);}; + std::function update_function = [] (Stat& stat, double user_value) {UNREFERENCED_PARAMETER(stat); UNREFERENCED_PARAMETER(user_value);}; std::function output_function = - [] (HardcodedStats& hc_stats, Stat& stat) -> Stat {return stat;}; + [] (HardcodedStats& hc_stats, Stat& stat) -> Stat {UNREFERENCED_PARAMETER(hc_stats); return stat;}; // For easy access unsigned int stat_id = UINT_MAX; @@ -104,4 +104,4 @@ class StatAggregator { } -#endif \ No newline at end of file +#endif diff --git a/src/factory/ConfigurableFactory.cpp b/src/factory/ConfigurableFactory.cpp index e8ab9b1..19cf3de 100644 --- a/src/factory/ConfigurableFactory.cpp +++ b/src/factory/ConfigurableFactory.cpp @@ -143,270 +143,10 @@ ConfigurableFactory::ConfigurableFactory (std::istream& file, const unsigned int } Layer* ConfigurableFactory::CreateLossLayer (const unsigned int output_classes, const datum loss_weight) { + UNREFERENCED_PARAMETER(output_classes); return new ErrorLayer(loss_weight); } -int ConfigurableFactory::AddLayers (Net& net, Connection data_layer_connection, const unsigned int output_classes, bool add_loss_layer, std::ostream& graph_output) { - std::mt19937 rand (seed_); - file_.clear(); - file_.seekg (0, std::ios::beg); - int last_layer_output = data_layer_connection.output; - int last_layer_id = data_layer_connection.net; - - graph_output << "node" << last_layer_id << " [shape=record, label=\"" << - "{Dataset Input | { Data | Label | Helper | Weight}}" - << "\"];\n"; - - int current_receptive_field_x = patch_field_x_; - int current_receptive_field_y = patch_field_y_; - datum llr_factor = 1.0; - - - Connection stack_a[64]; - Connection stack_b[64]; - int stack_a_pos = -1; - int stack_b_pos = -1; - - if (method_ == FCN) { - /*Tensor* const net_output = &net.buffer (data_layer_connection.net, data_layer_connection.output)->data; - datum net_output_size_x = net_output->width(); - datum net_output_size_y = net_output->height(); - llr_factor /= net_output_size_x; - llr_factor /= net_output_size_y; -#ifdef CN24_EMULATE_PATCH_LEARNING - llr_factor /= patch_field_x_; - llr_factor /= patch_field_y_; -#endif - LOGINFO << "Local learning rate factor is (initially): " << llr_factor;*/ - - int input_layer_id = last_layer_id; - int input_layer_output = last_layer_output; - ResizeLayer* rl = new ResizeLayer(receptive_field_x_, receptive_field_y_); - last_layer_id = net.AddLayer (rl, { data_layer_connection }); - last_layer_output = 0; - - WriteNode(graph_output, rl, input_layer_id, input_layer_output, last_layer_id, 1); - } - - bool first_layer = true; - - while (! file_.eof()) { - std::string line; - std::getline (file_, line); - if (line.compare (0, 1, "#") == 0) { - continue; - } - - /* - * PREPROCESSING - */ - - // Replace number of output neurons - if (line.find ("(o)") != std::string::npos) { - char buf[64]; - sprintf (buf, "%d", output_classes); - line.replace (line.find ("(o)"), 3, buf); - } - - // Replace fully connected layers - if (line.find ("fullyconnected") != std::string::npos) { - line.replace (line.find ("fullyconnected"), 14, "convolutional size=1x1"); - line.replace (line.find ("neurons="), 8, "kernels="); - } - - if (method_ == FCN) { - // Remove flatten layers - if (line.find ("flatten") != std::string::npos) { - line = ""; - } - } - - if (line.compare (0, 7, "?output") == 0) { - if (output_classes == 1) { - line = "?tanh"; - } else { - line = "?sigm"; - } - } - - /* - * STACK OPERATIONS - */ - if (line.compare(0, 5, "pusha") == 0) { - stack_a[++stack_a_pos].net = last_layer_id; - stack_a[stack_a_pos].output = last_layer_output; - } - - if (line.compare(0, 5, "pushb") == 0) { - stack_b[++stack_b_pos].net = last_layer_id; - stack_b[stack_b_pos].output = last_layer_output; - } - - if (line.compare(0, 4, "popa") == 0) { - last_layer_id = stack_a[stack_a_pos].net; - last_layer_output = stack_a[stack_a_pos--].output; - } - - if (line.compare(0, 4, "popb") == 0) { - last_layer_id = stack_b[stack_b_pos].net; - last_layer_output = stack_b[stack_b_pos--].output; - } - - /* - * PARSING - */ - if (line.compare (0, 1, "?") == 0) { - line = line.substr (1); - LOGDEBUG << "Parsing layer: " << line; - - if (StartsWithIdentifier (line, "convolutional")) { - unsigned int kx = 1, ky = 1, k = 1; - datum llr = 1; - datum dropout_fraction = 0.0; - ParseKernelSizeIfPossible (line, "size", kx, ky); - ParseCountIfPossible (line, "kernels", k); - ParseDatumParamIfPossible (line, "dropout", dropout_fraction); - ParseDatumParamIfPossible (line, "llr", llr); - LOGDEBUG << "Parsed dropout fraction: " << dropout_fraction; - - ConvolutionLayer* cl = new ConvolutionLayer (kx, ky, k, rand(), dropout_fraction); - - // if (method_ == FCN) { - LOGDEBUG << "LLR factor: " << llr_factor << ", RFX: " << current_receptive_field_x; - /* cl->SetLocalLearningRate (llr * llr_factor * (datum) current_receptive_field_x * (datum) current_receptive_field_y); -#ifdef CN24_EMULATE_PATCH_LEARNING - current_receptive_field_x -= (kx - 1); - current_receptive_field_y -= (ky - 1); -#endif - } else { */ - cl->SetLocalLearningRate (llr * llr_factor); - /* } */ - - if (first_layer) - cl->SetBackpropagationEnabled (false); - int input_layer_id = last_layer_id; - int input_layer_output = last_layer_output; - last_layer_id = net.AddLayer (cl , - { Connection (last_layer_id, last_layer_output) }); - last_layer_output = 0; - first_layer = false; - - WriteNode(graph_output, cl, input_layer_id, input_layer_output, last_layer_id, 1); - } - - if (StartsWithIdentifier (line, "maxpooling")) { - unsigned int kx = 1, ky = 1; - ParseKernelSizeIfPossible (line, "size", kx, ky); - - /*if (method_ == FCN) { -#ifdef CN24_EMULATE_PATCH_LEARNING - current_receptive_field_x /= kx; - current_receptive_field_y /= ky; - llr_factor *= (datum) (kx * ky); -#endif - }*/ - - int input_layer_id = last_layer_id; - int input_layer_output = last_layer_output; - - MaxPoolingLayer* mp = new MaxPoolingLayer (kx, ky); - last_layer_id = net.AddLayer (mp , - { Connection (last_layer_id, last_layer_output) }); - last_layer_output = 0; - - WriteNode(graph_output, mp, input_layer_id, input_layer_output, last_layer_id, 1); - } - - if (StartsWithIdentifier (line, "sigm")) { - int input_layer_id = last_layer_id; - int input_layer_output = last_layer_output; - - SigmoidLayer* l = new SigmoidLayer(); - last_layer_id = net.AddLayer (l , - { Connection (last_layer_id, last_layer_output) }); - last_layer_output = 0; - - WriteNode(graph_output, l, input_layer_id, input_layer_output, last_layer_id, 1); - } - - if (StartsWithIdentifier (line, "relu")) { - int input_layer_id = last_layer_id; - int input_layer_output = last_layer_output; - - ReLULayer* l = new ReLULayer(); - last_layer_id = net.AddLayer (l , - { Connection (last_layer_id, last_layer_output) }); - last_layer_output = 0; - - WriteNode(graph_output, l, input_layer_id, input_layer_output, last_layer_id, 1); - } - - if (StartsWithIdentifier (line, "tanh")) { - int input_layer_id = last_layer_id; - int input_layer_output = last_layer_output; - - TanhLayer* l = new TanhLayer(); - last_layer_id = net.AddLayer (l , - { Connection (last_layer_id, last_layer_output) }); - last_layer_output = 0; - - WriteNode(graph_output, l, input_layer_id, input_layer_output, last_layer_id, 1); - } - - if (StartsWithIdentifier (line, "spatialprior")) { - if (method_ == FCN) { - int input_layer_id = last_layer_id; - int input_layer_output = last_layer_output; - - SpatialPriorLayer* l = new SpatialPriorLayer(); - last_layer_id = net.AddLayer (l , - { Connection (last_layer_id, last_layer_output) }); - last_layer_output = 0; - - WriteNode(graph_output, l, input_layer_id, input_layer_output, last_layer_id, 1); - } - } - } - } - - if (method_ == FCN && (factorx != 1 || factory != 1)) { - int input_layer_id = last_layer_id; - int input_layer_output = last_layer_output; - - last_layer_id = net.AddLayer (new UpscaleLayer (factorx, factory), - { Connection (last_layer_id, last_layer_output) }); - last_layer_output = 0; - LOGDEBUG << "Added upscaling layer for FCN"; - - graph_output << "node" << last_layer_id << " [shape=record, shape=record, label=\"" << - "{Upscale Layer (" << factorx << "x" << factory << ") | Output" << "}\"];\n"; - graph_output << "node" << input_layer_id << ":o" << input_layer_output - << " -> node" << last_layer_id << ";\n"; - } - - // Add loss layer - if (add_loss_layer) { - int loss_layer_id = net.AddLayer(CreateLossLayer(output_classes), { - Connection(last_layer_id, last_layer_output), - Connection(data_layer_connection.net, 1), - Connection(data_layer_connection.net, 3) - }); - - - graph_output << "node" << loss_layer_id << " [shape=record, label=\"" << - "{Loss Layer | Output}" << "\"];\n"; - - graph_output << "node" << last_layer_id << ":o" << last_layer_output - << " -> node" << loss_layer_id << ";\n"; - graph_output << "node" << data_layer_connection.net << ":o" << 1 - << " -> node" << loss_layer_id << ";\n"; - graph_output << "node" << data_layer_connection.net << ":o" << 3 - << " -> node" << loss_layer_id << ";\n"; - } - - return last_layer_id; -} - bool ConfigurableFactory::AddLayers(NetGraph& net, NetGraphConnection data_layer_connection, const unsigned int output_classes, bool add_loss_layer) { std::mt19937 rand (seed_); file_.clear(); @@ -414,9 +154,6 @@ bool ConfigurableFactory::AddLayers(NetGraph& net, NetGraphConnection data_layer NetGraphConnection last_connection = data_layer_connection; - int current_receptive_field_x = patch_field_x_; - int current_receptive_field_y = patch_field_y_; - NetGraphConnection stack_a[64]; NetGraphConnection stack_b[64]; int stack_a_pos = -1; @@ -434,8 +171,6 @@ bool ConfigurableFactory::AddLayers(NetGraph& net, NetGraphConnection data_layer last_connection.backprop = false; } - bool first_layer = true; - while (! file_.eof()) { std::string line; std::getline (file_, line); diff --git a/src/factory/SkipLayerNetworkFactory.cpp b/src/factory/SkipLayerNetworkFactory.cpp deleted file mode 100644 index 2e49b44..0000000 --- a/src/factory/SkipLayerNetworkFactory.cpp +++ /dev/null @@ -1,69 +0,0 @@ -/* - * This file is part of the CN24 semantic segmentation software, - * copyright (C) 2015 Clemens-Alexander Brust (ikosa dot de at gmail dot com). - * - * For licensing information, see the LICENSE file included with this project. - */ -#include - -#include "ErrorLayer.h" - -#include "ConvolutionLayer.h" -#include "LocalResponseNormalizationLayer.h" -#include "ResizeLayer.h" -#include "MaxPoolingLayer.h" -#include "AdvancedMaxPoolingLayer.h" -#include "InputDownSamplingLayer.h" -#include "NonLinearityLayer.h" -#include "UpscaleLayer.h" -#include "SpatialPriorLayer.h" -#include "ConcatenationLayer.h" -#include "ConfigParsing.h" -#include "NetGraph.h" - -#include "SkipLayerNetworkFactory.h" - -namespace Conv { - -bool SkipLayerNetworkFactory::AddLayers(NetGraph& graph, NetGraphConnection data_layer_connection, const unsigned int output_classes, bool add_loss_layer) -{ - return false; -} - -int SkipLayerNetworkFactory::AddLayers(Net& net, Connection data_layer_connection, const unsigned int output_classes, bool add_loss_layer, std::ostream& graph_output) -{ - return 0; -} - -Layer* SkipLayerNetworkFactory::CreateLossLayer(const unsigned int output_classes, const datum loss_weight) -{ - return nullptr; -} - -void SkipLayerNetworkFactory::InitOptimalSettings() -{ - -} - -Method SkipLayerNetworkFactory::method() const -{ - return Method::FCN; -} - -TrainerSettings SkipLayerNetworkFactory::optimal_settings() const -{ - TrainerSettings s; - return s; -} - -int SkipLayerNetworkFactory::patchsizex() -{ - return 0; -} - -int SkipLayerNetworkFactory::patchsizey() -{ - return 0; -} - -} diff --git a/src/math/TensorMath.cpp b/src/math/TensorMath.cpp index 02f0999..28aabbb 100644 --- a/src/math/TensorMath.cpp +++ b/src/math/TensorMath.cpp @@ -171,9 +171,6 @@ void TensorMath::IM2COL(const Tensor& source, const int source_width, const int const int target_height = (2 * pad_height + source_height - kernel_height) / stride_height + 1; const int target_maps = kernel_width * kernel_height * maps; - const int target_size = samples * target_width * target_height * target_maps; - const int actual_target_size = target.samples() * target.width() * target.height() * target.maps(); - error |= clSetKernelArg (CLHelper::k_im2col, 0, sizeof (cl_mem), &(((Tensor&)source).cl_data_ptr_)); error |= clSetKernelArg (CLHelper::k_im2col, 1, sizeof (cl_mem), &(target.cl_data_ptr_)); error |= clSetKernelArg (CLHelper::k_im2col, 2, sizeof (cl_int), &source_width); @@ -271,9 +268,6 @@ void TensorMath::COL2IM(Tensor& source, const int source_width, const int source const int target_height = (2 * pad_height + source_height - kernel_height) / stride_height + 1; const int target_maps = kernel_width * kernel_height * maps; - const int target_size = samples * target_width * target_height * target_maps; - const int actual_target_size = target.samples() * target.width() * target.height() * target.maps(); - error |= clSetKernelArg (CLHelper::k_col2im, 0, sizeof (cl_mem), &(((Tensor&)source).cl_data_ptr_)); error |= clSetKernelArg (CLHelper::k_col2im, 1, sizeof (cl_mem), &(target.cl_data_ptr_)); error |= clSetKernelArg (CLHelper::k_col2im, 2, sizeof (cl_int), &source_width); @@ -461,8 +455,6 @@ void TensorMath::DOWN(const Tensor& source, Tensor& target, const int region_wid const int target_height = target.height(); const int source_width = source.width(); const int source_height = source.height(); - const int maps = target.maps(); - const int samples = target.samples(); cl_uint error = 0; error |= clSetKernelArg (CLHelper::k_down, 0, sizeof (cl_mem), &(((Tensor&)source).cl_data_ptr_)); @@ -536,8 +528,6 @@ void TensorMath::UP(const Tensor& source, Tensor& target, const int region_width const int target_height = target.height(); const int source_width = source.width(); const int source_height = source.height(); - const int maps = target.maps(); - const int samples = target.samples(); cl_uint error = 0; error |= clSetKernelArg (CLHelper::k_up, 0, sizeof (cl_mem), &(((Tensor&)source).cl_data_ptr_)); @@ -570,7 +560,6 @@ void TensorMath::UP(const Tensor& source, Tensor& target, const int region_width #endif } else { #endif - const datum region_area = (datum)region_width * (datum)region_height; const int width = source.width(); const int height = source.height(); const int maps = source.maps(); diff --git a/src/net/BinaryStatLayer.cpp b/src/net/BinaryStatLayer.cpp index d286f5f..504b1d0 100644 --- a/src/net/BinaryStatLayer.cpp +++ b/src/net/BinaryStatLayer.cpp @@ -52,42 +52,42 @@ BinaryStatLayer::BinaryStatLayer ( const unsigned int thresholds, stat_fpr_->nullable = true; stat_fpr_->init_function = [this] (Stat& stat) { stat.is_null = true; stat.value = 0; Reset(); }; stat_fpr_->update_function = [] (Stat& stat, double user_value) { stat.is_null = false; stat.value = user_value; }; - stat_fpr_->output_function = [] (HardcodedStats& hc_stats, Stat& stat) -> Stat { return stat; }; + stat_fpr_->output_function = [] (HardcodedStats& hc_stats, Stat& stat) -> Stat { UNREFERENCED_PARAMETER(hc_stats); return stat; }; stat_fnr_->description = "False Negative Rate"; stat_fnr_->unit = "%"; stat_fnr_->nullable = true; stat_fnr_->init_function = [] (Stat& stat) { stat.is_null = true; stat.value = 0; }; stat_fnr_->update_function = [] (Stat& stat, double user_value) { stat.is_null = false; stat.value = user_value; }; - stat_fnr_->output_function = [] (HardcodedStats& hc_stats, Stat& stat) -> Stat { return stat; }; + stat_fnr_->output_function = [] (HardcodedStats& hc_stats, Stat& stat) -> Stat { UNREFERENCED_PARAMETER(hc_stats); return stat; }; stat_pre_->description = "Precision"; stat_pre_->unit = "%"; stat_pre_->nullable = true; stat_pre_->init_function = [] (Stat& stat) { stat.is_null = true; stat.value = 0; }; stat_pre_->update_function = [] (Stat& stat, double user_value) { stat.is_null = false; stat.value = user_value; }; - stat_pre_->output_function = [] (HardcodedStats& hc_stats, Stat& stat) -> Stat { return stat; }; + stat_pre_->output_function = [] (HardcodedStats& hc_stats, Stat& stat) -> Stat { UNREFERENCED_PARAMETER(hc_stats); return stat; }; stat_rec_->description = "Recall"; stat_rec_->unit = "%"; stat_rec_->nullable = true; stat_rec_->init_function = [] (Stat& stat) { stat.is_null = true; stat.value = 0; }; stat_rec_->update_function = [] (Stat& stat, double user_value) { stat.is_null = false; stat.value = user_value; }; - stat_rec_->output_function = [] (HardcodedStats& hc_stats, Stat& stat) -> Stat { return stat; }; + stat_rec_->output_function = [] (HardcodedStats& hc_stats, Stat& stat) -> Stat { UNREFERENCED_PARAMETER(hc_stats); return stat; }; stat_acc_->description = "Accuracy"; stat_acc_->unit = "%"; stat_acc_->nullable = true; stat_acc_->init_function = [] (Stat& stat) { stat.is_null = true; stat.value = 0; }; stat_acc_->update_function = [] (Stat& stat, double user_value) { stat.is_null = false; stat.value = user_value; }; - stat_acc_->output_function = [] (HardcodedStats& hc_stats, Stat& stat) -> Stat { return stat; }; + stat_acc_->output_function = [] (HardcodedStats& hc_stats, Stat& stat) -> Stat { UNREFERENCED_PARAMETER(hc_stats); return stat; }; stat_f1_->description = "F1 Value"; stat_f1_->unit = "%"; stat_f1_->nullable = true; stat_f1_->init_function = [] (Stat& stat) { stat.is_null = true; stat.value = 0; }; stat_f1_->update_function = [] (Stat& stat, double user_value) { stat.is_null = false; stat.value = user_value; }; - stat_f1_->output_function = [] (HardcodedStats& hc_stats, Stat& stat) -> Stat { return stat; }; + stat_f1_->output_function = [] (HardcodedStats& hc_stats, Stat& stat) -> Stat { UNREFERENCED_PARAMETER(hc_stats); return stat; }; // Register stats System::stat_aggregator->RegisterStat(stat_fpr_); @@ -167,6 +167,7 @@ void BinaryStatLayer::UpdateAll() { } bool BinaryStatLayer::CreateOutputs ( const std::vector< CombinedTensor* >& inputs, std::vector< CombinedTensor* >& outputs ) { + UNREFERENCED_PARAMETER(outputs); // Validate input node count if ( inputs.size() != 3 ) { LOGERROR << "Need exactly 3 inputs to calculate binary stat!"; @@ -203,6 +204,7 @@ bool BinaryStatLayer::CreateOutputs ( const std::vector< CombinedTensor* >& inpu } bool BinaryStatLayer::Connect ( const std::vector< CombinedTensor* >& inputs, const std::vector< CombinedTensor* >& outputs, const NetStatus* net ) { + UNREFERENCED_PARAMETER(net); // Needs exactly three inputs to calculate the stat if ( inputs.size() != 3 ) return false; @@ -268,6 +270,8 @@ void BinaryStatLayer::Reset() { } void BinaryStatLayer::Print ( std::string prefix, bool training ) { + UNREFERENCED_PARAMETER(prefix); + UNREFERENCED_PARAMETER(training); // Now deprecated } diff --git a/src/net/ConcatenationLayer.cpp b/src/net/ConcatenationLayer.cpp index db2775c..02f803d 100644 --- a/src/net/ConcatenationLayer.cpp +++ b/src/net/ConcatenationLayer.cpp @@ -54,6 +54,7 @@ bool ConcatenationLayer::CreateOutputs (const std::vector< CombinedTensor* >& in bool ConcatenationLayer::Connect (const std::vector< CombinedTensor* >& inputs, const std::vector< CombinedTensor* >& outputs, const NetStatus* status ) { + UNREFERENCED_PARAMETER(status); if(inputs.size() != 2) { LOGERROR << "Needs two inputs!"; return false; @@ -116,4 +117,4 @@ void ConcatenationLayer::BackPropagate() { } } -} \ No newline at end of file +} diff --git a/src/net/ConfusionMatrixLayer.cpp b/src/net/ConfusionMatrixLayer.cpp index 1f189fa..a25d8d9 100644 --- a/src/net/ConfusionMatrixLayer.cpp +++ b/src/net/ConfusionMatrixLayer.cpp @@ -35,6 +35,7 @@ ConfusionMatrixLayer::ConfusionMatrixLayer ( stat_orr_->init_function = [this] (Stat& stat) { stat.is_null = true; stat.value = 0; Reset();}; stat_orr_->update_function = [] (Stat& stat, double user_value) { stat.is_null = false; stat.value = user_value; }; stat_orr_->output_function = [] (HardcodedStats& hc_stats, Stat& stat) -> Stat { + UNREFERENCED_PARAMETER(hc_stats); return stat; }; @@ -44,6 +45,7 @@ ConfusionMatrixLayer::ConfusionMatrixLayer ( stat_arr_->init_function = [] (Stat& stat) { stat.is_null = true; stat.value = 0; }; stat_arr_->update_function = [] (Stat& stat, double user_value) { stat.is_null = false; stat.value = user_value; }; stat_arr_->output_function = [] (HardcodedStats& hc_stats, Stat& stat) -> Stat { + UNREFERENCED_PARAMETER(hc_stats); return stat; }; @@ -53,6 +55,7 @@ ConfusionMatrixLayer::ConfusionMatrixLayer ( stat_iou_->init_function = [] (Stat& stat) { stat.is_null = true; stat.value = 0; }; stat_iou_->update_function = [] (Stat& stat, double user_value) { stat.is_null = false; stat.value = user_value; }; stat_iou_->output_function = [] (HardcodedStats& hc_stats, Stat& stat) -> Stat { + UNREFERENCED_PARAMETER(hc_stats); return stat; }; @@ -114,6 +117,7 @@ void ConfusionMatrixLayer::UpdateAll() { bool ConfusionMatrixLayer::CreateOutputs ( const std::vector< CombinedTensor* >& inputs, std::vector< CombinedTensor* >& outputs ) { + UNREFERENCED_PARAMETER(outputs); // Validate input node count if ( inputs.size() != 3 ) { LOGERROR << "Need exactly 3 inputs to calculate confusion matrix!"; @@ -148,6 +152,7 @@ bool ConfusionMatrixLayer::Connect ( const std::vector< CombinedTensor* >& inputs, const std::vector< CombinedTensor* >& outputs, const NetStatus* net ) { + UNREFERENCED_PARAMETER(net); // Needs exactly three inputs to calculate the stat if ( inputs.size() != 3 ) return false; diff --git a/src/net/ConvolutionLayer.cpp b/src/net/ConvolutionLayer.cpp index 4795dc4..a4e1848 100644 --- a/src/net/ConvolutionLayer.cpp +++ b/src/net/ConvolutionLayer.cpp @@ -17,7 +17,6 @@ #include "Config.h" #include "Log.h" -#include "Net.h" #include "CLHelper.h" #include "TensorMath.h" diff --git a/src/net/DatasetInputLayer.cpp b/src/net/DatasetInputLayer.cpp index 5d837ce..b87a49b 100644 --- a/src/net/DatasetInputLayer.cpp +++ b/src/net/DatasetInputLayer.cpp @@ -114,6 +114,7 @@ bool DatasetInputLayer::CreateOutputs (const std::vector< CombinedTensor* >& inp bool DatasetInputLayer::Connect (const std::vector< CombinedTensor* >& inputs, const std::vector< CombinedTensor* >& outputs, const NetStatus* net) { + UNREFERENCED_PARAMETER(net); // TODO validate CombinedTensor* data_output = outputs[0]; CombinedTensor* label_output = outputs[1]; diff --git a/src/net/ErrorLayer.cpp b/src/net/ErrorLayer.cpp index d2aabe0..f48a54b 100644 --- a/src/net/ErrorLayer.cpp +++ b/src/net/ErrorLayer.cpp @@ -23,6 +23,7 @@ ErrorLayer::ErrorLayer(const datum loss_weight) bool ErrorLayer::CreateOutputs ( const std::vector< CombinedTensor* >& inputs, std::vector< CombinedTensor* >& outputs ) { + UNREFERENCED_PARAMETER(outputs); // Validate input node count if ( inputs.size() != 3 ) { LOGERROR << "Need exactly 3 inputs to calculate loss function!"; @@ -61,6 +62,7 @@ bool ErrorLayer::CreateOutputs ( const std::vector< CombinedTensor* >& inputs, bool ErrorLayer::Connect ( const std::vector< CombinedTensor* >& inputs, const std::vector< CombinedTensor* >& outputs, const NetStatus* net ) { + UNREFERENCED_PARAMETER(net); // Needs exactly three inputs to calculate the difference if ( inputs.size() != 3 ) return false; diff --git a/src/net/GradientAccumulationLayer.cpp b/src/net/GradientAccumulationLayer.cpp index aea87b7..51e89a8 100644 --- a/src/net/GradientAccumulationLayer.cpp +++ b/src/net/GradientAccumulationLayer.cpp @@ -40,6 +40,7 @@ bool GradientAccumulationLayer::CreateOutputs (const std::vector< CombinedTensor bool GradientAccumulationLayer::Connect (const std::vector< CombinedTensor* >& inputs, const std::vector< CombinedTensor* >& outputs, const NetStatus* status ) { + UNREFERENCED_PARAMETER(status); if(inputs.size() != 1) { LOGERROR << "Needs one input!"; return false; @@ -96,4 +97,4 @@ void GradientAccumulationLayer::BackPropagate() { } } -} \ No newline at end of file +} diff --git a/src/net/InputLayer.cpp b/src/net/InputLayer.cpp index 725bbe2..7fb70c6 100644 --- a/src/net/InputLayer.cpp +++ b/src/net/InputLayer.cpp @@ -79,6 +79,7 @@ InputLayer::InputLayer ( Tensor& data, Tensor& label, Tensor& helper, bool InputLayer::Connect ( const std::vector< CombinedTensor* >& inputs, const std::vector< CombinedTensor* >& outputs, const NetStatus* net ) { + UNREFERENCED_PARAMETER(net); // Check if inputs were accidentally supplied if ( inputs.size() != 0 ) { LOGERROR << "Input layer cannot have inputs!"; diff --git a/src/net/LocalResponseNormalizationLayer.cpp b/src/net/LocalResponseNormalizationLayer.cpp index 4412194..6c904dc 100644 --- a/src/net/LocalResponseNormalizationLayer.cpp +++ b/src/net/LocalResponseNormalizationLayer.cpp @@ -53,6 +53,7 @@ bool LocalResponseNormalizationLayer:: bool LocalResponseNormalizationLayer:: Connect(const CombinedTensor* input, CombinedTensor* output) { + UNREFERENCED_PARAMETER(output); // TODO Validate dimensions bool valid = true; @@ -188,4 +189,4 @@ void LocalResponseNormalizationLayer::BackPropagate() { } } -} \ No newline at end of file +} diff --git a/src/net/Net.cpp b/src/net/Net.cpp deleted file mode 100644 index 036b948..0000000 --- a/src/net/Net.cpp +++ /dev/null @@ -1,318 +0,0 @@ -/* - * This file is part of the CN24 semantic segmentation software, - * copyright (C) 2015 Clemens-Alexander Brust (ikosa dot de at gmail dot com). - * - * For licensing information, see the LICENSE file included with this project. - */ -#include "Log.h" -#include "Init.h" - -#include "TensorViewer.h" - -#include - -#include "Net.h" - -namespace Conv { - -int Net::AddLayer (Layer* layer, const std::vector< Connection >& connections) { - // Check for null pointer - if (layer == nullptr) { - FATAL ("Null pointer supplied"); - return -1; - } - - // Determine Layer's id - int layer_id = layers_.size(); - - // Add the layer - layers_.push_back (layer); - - // Get inputs - std::vector inputs; - for (unsigned int i = 0; i < connections.size(); i++) { - Connection connection = connections[i]; - CombinedTensor* buffer = buffers_[connection.net][connection.output]; - inputs.push_back (buffer); - LOGDEBUG << "Layer " << layer_id << " input: layer " << connection.net << - ", output " << connection.output; - if (i == 0 && connection.output == 0) { - // Tell layer below - Layer* below = layers_[connection.net]; - weight_connections_.push_back ( {below, layer}); - } - } - // These names are bad. inputs_ contains the input buffers for all the layers - // and inputs contains the input buffers for the currently added layer. - inputs_.push_back (inputs); - - // Ask the layer to create an output buffer - std::vector outputs; - bool result = layer->CreateOutputs (inputs, outputs); - if (!result) { - FATAL ("Layer will not create output buffer!"); - return -1; - } - - for (unsigned int i = 0; i < outputs.size(); i++) { - CombinedTensor* output = outputs[i]; - LOGDEBUG << "Layer " << layer_id << " output " << i << ": " << - output->data; - } - - // Connect the layer - bool connection_result = layer->Connect (inputs, outputs, this); - if (!connection_result) { - FATAL ("Layer failed to connect!"); - return -1; - } - - // Save outputs - buffers_.push_back (outputs); - - LOGDEBUG << "Layer " << layer_id << " added."; - -#ifdef BUILD_OPENCL - if (layer->IsOpenCLAware()) { - LOGDEBUG << "Layer " << layer_id << " is OpenCL aware"; - } else { - LOGWARN << "Layer " << layer_id << " is NOT OpenCL aware"; - } -#endif - - // Check if layer supports training - if (dynamic_cast (layer) != NULL) { - // If it does, save the pointer - if (training_layer_ == nullptr) { - LOGDEBUG << "Layer " << layer_id << " added as training layer."; - training_layer_ = dynamic_cast (layer); - } else { - FATAL ("Cannot add another training layer!"); - return -1; - } - } - - // Check if layer is a binary stat layer - if (dynamic_cast (layer) != NULL) { - // If it is, save the pointer - if (binary_stat_layer_ == nullptr) { - LOGDEBUG << "Layer " << layer_id << " added as binary stat layer."; - binary_stat_layer_ = dynamic_cast (layer); - } else { - FATAL ("Cannot add another binary stat layer!"); - return -1; - } - } - - // Check if layer is a confusion matrix layer - if (dynamic_cast (layer) != NULL) { - // If it is, save the pointer - if (confusion_matrix_layer_ == nullptr) { - LOGDEBUG << "Layer " << layer_id << " added as confusion matrix layer."; - confusion_matrix_layer_ = dynamic_cast (layer); - } else { - FATAL ("Cannot add another confusion matrix layer!"); - return -1; - } - } - - // Check if layer is loss function layer - if (dynamic_cast (layer) != NULL) { - // If it is, save the pointer - if (lossfunction_layer_ == nullptr) { - LOGDEBUG << "Layer " << layer_id << " added as loss function layer."; - lossfunction_layer_ = dynamic_cast (layer); - } else { - FATAL ("Cannot add another loss function layer!"); - return -1; - } - } - - - // Check if layer is a stat layer - if (dynamic_cast (layer) != NULL) { - // If it is, add to vector - StatLayer* stat_layer = dynamic_cast (layer); - stat_layers_.push_back (stat_layer); - - LOGDEBUG << "Layer " << layer_id << " added as stat layer. "; - } - - // Return the layer number - return layer_id; -} - -int Net::AddLayer (Layer* layer, const int input_layer) { - return AddLayer (layer, {Connection (input_layer) }); -} - - -void Net::InitializeWeights() { - for (int l = weight_connections_.size() - 1; l > 0; l--) { - std::pair p = weight_connections_[l]; - p.first->OnLayerConnect({ p.second }); - } -} - - - -void Net::FeedForward() { -#ifdef LAYERTIME - if (forward_durations_ == nullptr) { - forward_durations_ = new std::chrono::duration[layers_.size()]; - backward_durations_ = new std::chrono::duration[layers_.size()]; - - for (unsigned int l = 0; l < layers_.size(); l++) { - forward_durations_[l] = std::chrono::duration::zero(); - backward_durations_[l] = std::chrono::duration::zero(); - } - } -#endif - for (unsigned int l = 0; l < layers_.size(); l++) { - Layer* layer = layers_[l]; - -#ifdef LAYERTIME - auto t_begin = std::chrono::system_clock::now(); -#endif - -#ifdef BUILD_OPENCL - if (!layer->IsOpenCLAware()) { - for (unsigned int i = 0; i < inputs_[l].size(); i++) { - inputs_[l][i]->data.MoveToCPU(); - inputs_[l][i]->delta.MoveToCPU(); - } - - for (unsigned int i = 0; i < buffers_[l].size(); i++) { - buffers_[l][i]->data.MoveToCPU(); - buffers_[l][i]->delta.MoveToCPU(); - } - } -#endif - Tensor* output0 = nullptr; - if(buffers_[l].size() > 0) - output0 = &(buffers_[l][0]->data); - layer->FeedForward(); - -#ifdef LAYERVIEW - if(output0 != nullptr && layer_view_enabled_) { - std::stringstream sstr; - sstr << "Tensor Viewer: Layer " << l; -#ifdef BUILD_OPENCL - output0->MoveToCPU(); -#endif - System::viewer->show(output0, sstr.str()); - } -#endif - - output0 = nullptr; - -#ifdef LAYERTIME - auto t_end = std::chrono::system_clock::now(); - forward_durations_[l] += t_end - t_begin; -#endif - } -} - -void Net::FeedForward (const unsigned int last) { - for (unsigned int l = 0; l <= last; l++) { - Layer* layer = layers_[l]; -#ifdef BUILD_OPENCL - if (!layer->IsOpenCLAware()) { - for (unsigned int i = 0; i < inputs_[l].size(); i++) { - inputs_[l][i]->data.MoveToCPU(); - inputs_[l][i]->delta.MoveToCPU(); - } - - for (unsigned int i = 0; i < buffers_[l].size(); i++) { - buffers_[l][i]->data.MoveToCPU(); - buffers_[l][i]->delta.MoveToCPU(); - } - } -#endif - layer->FeedForward(); - } -} - - -void Net::BackPropagate() { - for (int l = (layers_.size() - 1); l >= 0; l--) { - Layer* layer = layers_[l]; - -#ifdef LAYERTIME - auto t_begin = std::chrono::system_clock::now(); -#endif - -#ifdef BUILD_OPENCL - if (!layer->IsOpenCLAware()) { - for (unsigned int i = 0; i < inputs_[l].size(); i++) { - inputs_[l][i]->data.MoveToCPU(); - inputs_[l][i]->delta.MoveToCPU(); - } - - for (unsigned int i = 0; i < buffers_[l].size(); i++) { - buffers_[l][i]->data.MoveToCPU(); - buffers_[l][i]->delta.MoveToCPU(); - } - } -#endif - layer->BackPropagate(); - -#ifdef LAYERTIME - auto t_end = std::chrono::system_clock::now(); - backward_durations_[l] += t_end - t_begin; -#endif - - } -} - -void Net::GetParameters (std::vector< CombinedTensor* >& parameters) { - for (unsigned int l = 0; l < layers_.size(); l++) { - Layer* layer = layers_[l]; - for (unsigned int p = 0; p < layer->parameters().size(); p++) { - parameters.push_back (layer->parameters() [p]); - } - } -} - -void Net::SerializeParameters (std::ostream& output) { - for (unsigned int l = 0; l < layers_.size(); l++) { - Layer* layer = layers_[l]; - for (unsigned int p = 0; p < layer->parameters().size(); p++) { - layer->parameters() [p]->data.Serialize (output); - } - } -} - -void Net::DeserializeParameters (std::istream& input, unsigned int last_layer) { - if (last_layer == 0 || last_layer >= layers_.size()) - last_layer = layers_.size() - 1; - for (unsigned int l = 0; l <= last_layer; l++) { - Layer* layer = layers_[l]; - for (unsigned int p = 0; p < layer->parameters().size(); p++) { - if (!input.good() || input.eof()) - break; - layer->parameters() [p]->data.Deserialize (input); - LOGINFO << "Loaded parameters for layer " << l << " parameter set " << p << ": " << layer->parameters()[p]->data; - input.peek(); - } - } -} - -void Net::PrintAndResetLayerTime(datum samples) { -#ifdef LAYERTIME - std::cout << std::endl << "LAYERTIME (" << samples << ")" << std::endl; - datum tps_sum = 0.0; - for(unsigned int l = 0; l < layers_.size(); l++) { - std::cout << "forward " << l << "," << std::fixed << std::setprecision(9) << 1000000.0 * forward_durations_[l].count() / samples << "\n"; - std::cout << "backwrd " << l << "," << std::fixed << std::setprecision(9) << 1000000.0 * backward_durations_[l].count() / samples << "\n"; - tps_sum += 1000000.0 * forward_durations_[l].count() / samples; - tps_sum += 1000000.0 * backward_durations_[l].count() / samples; - forward_durations_[l] = std::chrono::duration::zero(); - backward_durations_[l] = std::chrono::duration::zero(); - } - - std::cout << "Total tps in net: " << tps_sum << " us" << std::endl; -#endif - } - -} diff --git a/src/net/SumLayer.cpp b/src/net/SumLayer.cpp index 3d0b7fb..17ec73c 100644 --- a/src/net/SumLayer.cpp +++ b/src/net/SumLayer.cpp @@ -61,6 +61,7 @@ bool SumLayer::CreateOutputs (const std::vector< CombinedTensor* >& inputs, bool SumLayer::Connect (const std::vector< CombinedTensor* >& inputs, const std::vector< CombinedTensor* >& outputs, const NetStatus* status ) { + UNREFERENCED_PARAMETER(status); if(inputs.size() != 2) { LOGERROR << "Needs two inputs!"; return false; @@ -112,4 +113,4 @@ void SumLayer::BackPropagate() { } } -} \ No newline at end of file +} diff --git a/src/net/Trainer.cpp b/src/net/Trainer.cpp index f5ee335..ec6d6e5 100644 --- a/src/net/Trainer.cpp +++ b/src/net/Trainer.cpp @@ -9,8 +9,9 @@ #include #include "Log.h" -#include "Net.h" +#include "NetGraph.h" #include "StatLayer.h" +#include "LossFunctionLayer.h" #include "CLHelper.h" #include "StatAggregator.h" #include "Init.h" @@ -288,7 +289,6 @@ void Trainer::Test() { System::stat_aggregator->Update(stat_fps_->stat_id, (double)(first_training_layer_->GetBatchSize()) * (double)iterations); for (unsigned int n = 0; n < graph_.GetLossNodes().size(); n++) { - LossFunctionLayer* lossfunction_layer = dynamic_cast(graph_.GetLossNodes()[n]->layer); LOGINFO << "Testing (Epoch " << epoch_ << ", node " << n << ") " << graph_.GetLossNodes()[n]->layer->GetLayerDescription() << " lps: " << loss_sums[n] / (datum)(iterations * sample_count_); } @@ -403,7 +403,6 @@ void Trainer::Epoch() { // Display training epoch_error for (unsigned int n = 0; n < graph_.GetLossNodes().size(); n++) { - LossFunctionLayer* lossfunction_layer = dynamic_cast(graph_.GetLossNodes()[n]->layer); LOGINFO << "Training (Epoch " << epoch_ << ", node " << n << ") " << graph_.GetLossNodes()[n]->layer->GetLayerDescription() << " lps: " << loss_sums[n] / (datum)(iterations * sample_count_ * settings_.sbatchsize * first_training_layer_->GetLossSamplingProbability()); } diff --git a/src/util/GradientTester.cpp b/src/util/GradientTester.cpp index df576d2..0e8700f 100644 --- a/src/util/GradientTester.cpp +++ b/src/util/GradientTester.cpp @@ -6,7 +6,6 @@ */ #include "Log.h" -#include "Net.h" #include "GradientTester.h" namespace Conv { @@ -110,4 +109,4 @@ graph.FeedForward(); } -} \ No newline at end of file +} diff --git a/src/util/Init.cpp b/src/util/Init.cpp index c5a460b..171500a 100644 --- a/src/util/Init.cpp +++ b/src/util/Init.cpp @@ -87,7 +87,7 @@ void System::Init(int requested_log_level) { } else log_level = requested_log_level; - LOGINFO << "CN24 v2.0.0 at " STRING_SHA1; + LOGINFO << "CN24 v2.0.1 at " STRING_SHA1; LOGINFO << "Copyright (C) 2015 Clemens-Alexander Brust"; LOGINFO << "For licensing information, see the LICENSE" << " file included with this project."; diff --git a/src/util/JPGUtil.cpp b/src/util/JPGUtil.cpp index e7224bf..9c4cd99 100644 --- a/src/util/JPGUtil.cpp +++ b/src/util/JPGUtil.cpp @@ -122,6 +122,7 @@ bool JPGUtil::WriteToFile ( const std::string& file, Tensor& tensor ) { bool JPGUtil::CheckSignature (std::istream& stream) { + UNREFERENCED_PARAMETER(stream); return true; } diff --git a/src/util/TensorStreamDataset.cpp b/src/util/TensorStreamDataset.cpp index 377e9c4..a0745b0 100644 --- a/src/util/TensorStreamDataset.cpp +++ b/src/util/TensorStreamDataset.cpp @@ -38,8 +38,7 @@ TensorStreamDataset::TensorStreamDataset ( std::vector< std::string > class_names, std::vector class_colors, std::vector class_weights, - dataset_localized_error_function error_function, - int training_fd, int testing_fd ) : + dataset_localized_error_function error_function) : training_stream_(training_stream), testing_stream_(testing_stream), classes_ (classes), class_names_ (class_names), class_colors_ (class_colors), class_weights_(class_weights), @@ -73,7 +72,6 @@ TensorStreamDataset::TensorStreamDataset ( tensors_ = (tensor_count_testing_ + tensor_count_training_) / 2; // Read tensors - unsigned int e = 0; max_width_ = 0; max_height_ = 0; @@ -295,9 +293,6 @@ TensorStreamDataset* TensorStreamDataset::CreateFromConfiguration (std::istream& dataset_localized_error_function error_function = DefaultLocalizedErrorFunction; std::string training_file; std::string testing_file; - int training_fd = 0; - int testing_fd = 0; - bool no_mmap = false; TensorStream* training_stream = new FloatTensorStream(); TensorStream* testing_stream = new FloatTensorStream(); @@ -309,11 +304,6 @@ TensorStreamDataset* TensorStreamDataset::CreateFromConfiguration (std::istream& std::string line; std::getline (file, line); - if (StartsWithIdentifier (line, "nommap")) { - LOGDEBUG << "Dataset requested to not be memory mapped."; - no_mmap = true; - } - if (StartsWithIdentifier (line, "classes")) { ParseCountIfPossible (line, "classes", classes); @@ -394,7 +384,7 @@ TensorStreamDataset* TensorStreamDataset::CreateFromConfiguration (std::istream& } return new TensorStreamDataset (training_stream, testing_stream, classes, - class_names, class_colors, class_weights, error_function, training_fd, testing_fd); + class_names, class_colors, class_weights, error_function); } } diff --git a/src/util/TensorStreamPatchDataset.cpp b/src/util/TensorStreamPatchDataset.cpp index e01f7e5..5a5e11e 100644 --- a/src/util/TensorStreamPatchDataset.cpp +++ b/src/util/TensorStreamPatchDataset.cpp @@ -26,6 +26,10 @@ namespace Conv { datum DefaultLocalizedErrorFunction (unsigned int x, unsigned int y, unsigned int w, unsigned int h) { + UNREFERENCED_PARAMETER(x); + UNREFERENCED_PARAMETER(y); + UNREFERENCED_PARAMETER(w); + UNREFERENCED_PARAMETER(h); return 1; } TensorStreamPatchDataset::TensorStreamPatchDataset(std::istream& training_stream, @@ -110,8 +114,6 @@ TensorStreamPatchDataset::TensorStreamPatchDataset(std::istream& training_stream } // Read tensors - unsigned int e = 0; - if((tensor_count_training_ + tensor_count_testing_) > 0) { LOGINFO << "Deserializing " << (tensor_count_training_ + tensor_count_testing_) / 2 << " Tensors..." << std::endl << std::flush; } diff --git a/src/util/TensorViewer.cpp b/src/util/TensorViewer.cpp index a16f446..e91dc09 100644 --- a/src/util/TensorViewer.cpp +++ b/src/util/TensorViewer.cpp @@ -86,6 +86,9 @@ void TensorViewer::show ( Tensor* tensor, const std::string& title, bool autoclo t1.join(); #else + UNREFERENCED_PARAMETER(autoclose); + UNREFERENCED_PARAMETER(map); + UNREFERENCED_PARAMETER(sample); LOGWARN << "Cannot show Tensor: " << tensor << ", " << title; #endif } diff --git a/tools/networkGraph.cpp b/tools/networkGraph.cpp deleted file mode 100644 index 3a2a389..0000000 --- a/tools/networkGraph.cpp +++ /dev/null @@ -1,115 +0,0 @@ -/* - * This file is part of the CN24 semantic segmentation software, - * copyright (C) 2015 Clemens-Alexander Brust (ikosa dot de at gmail dot com). - * - * For licensing information, see the LICENSE file included with this project. - */ -/** - * @file networkGraph.cpp - * @brief Writes a graphviz file displaying the network's architecture to standard output. - * - * @author Clemens-Alexander Brust(ikosa dot de at gmail dot com) - */ - -#define NO_LOG_AT_ALL - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -int main (int argc, char* argv[]) { - if (argc < 3) { - LOGERROR << "USAGE: " << argv[0] << " "; - LOGEND; - return -1; - } - - std::string net_config_fname (argv[2]); - std::string dataset_config_fname (argv[1]); - - std::ostringstream ss; - - Conv::System::Init(); - - // Open network and dataset configuration files - std::ifstream net_config_file (net_config_fname, std::ios::in); - std::ifstream dataset_config_file (dataset_config_fname, std::ios::in); - - if (!net_config_file.good()) { - FATAL ("Cannot open net configuration file!"); - } - - net_config_fname = net_config_fname.substr (net_config_fname.rfind ("/") + 1); - - if (!dataset_config_file.good()) { - FATAL ("Cannot open dataset configuration file!"); - } - - dataset_config_fname = dataset_config_fname.substr (net_config_fname.rfind ("/") + 1); - - // Parse network configuration file - Conv::ConfigurableFactory* factory = new Conv::ConfigurableFactory (net_config_file, 8347734, true); - factory->InitOptimalSettings(); - - // Extract important settings from parsed configuration - Conv::TrainerSettings settings = factory->optimal_settings(); - settings.pbatchsize = 1; - unsigned int BATCHSIZE = settings.pbatchsize; - LOGDEBUG << "Optimal settings: " << settings; - - // Load dataset - Conv::Dataset* dataset = nullptr; - if (factory->method() == Conv::PATCH) { - dataset = Conv::TensorStreamPatchDataset::CreateFromConfiguration(dataset_config_file, false, Conv::LOAD_BOTH, factory->patchsizex(), factory->patchsizey()); - } - else if (factory->method() == Conv::FCN) { - dataset = Conv::TensorStreamDataset::CreateFromConfiguration(dataset_config_file, false, Conv::LOAD_BOTH); - } - - unsigned int CLASSES = dataset->GetClasses(); - - // Assemble net - Conv::Net net; - int data_layer_id = 0; - - Conv::DatasetInputLayer* data_layer = nullptr; - - data_layer = new Conv::DatasetInputLayer (*dataset, BATCHSIZE, 1.0, 983923); - data_layer_id = net.AddLayer (data_layer); - - int output_layer_id = - factory->AddLayers (net, Conv::Connection (data_layer_id), CLASSES, true, ss); - - LOGDEBUG << "Output layer id: " << output_layer_id; - - Conv::NetGraphNode* data_node = new Conv::NetGraphNode(data_layer); - data_node->is_input = true; - - Conv::NetGraph graph; - graph.AddNode(data_node); - bool completeness = factory->AddLayers(graph, Conv::NetGraphConnection(data_node, 0), CLASSES, true); - - graph.Initialize(); - - LOGINFO << "Complete: " << completeness; - - if(!completeness) - FATAL("Graph completeness test failed!"); - - LOGINFO << "DONE!"; - LOGEND; - - std::cout << "\ndigraph G {\n"; - graph.PrintGraph(std::cout); - std::cout << "}\n"; - //std::cout << "\nGraph output:\ndigraph G {\n" << ss.str() << "\n}\n"; - return 0; -} \ No newline at end of file diff --git a/tools/runBenchmark.cpp b/tools/runBenchmark.cpp index 63d9912..0820f17 100644 --- a/tools/runBenchmark.cpp +++ b/tools/runBenchmark.cpp @@ -4,6 +4,7 @@ * * For licensing information, see the LICENSE file included with this project. */ + /** * @file runBenchmark.cpp * @@ -15,6 +16,7 @@ #include #include #include +#include #include @@ -164,6 +166,13 @@ int main (int argc, char* argv[]) { Conv::Tensor data_tensor(factory->optimal_settings().pbatchsize, width, height, INPUTMAPS); data_tensor.Clear(); + // Generate random contents + std::mt19937 rand(1337); + std::uniform_real_distribution dist (0.0, 1.0); + for(unsigned int e = 0; e < data_tensor.elements(); e++) { + (data_tensor.data_ptr())[e] = dist(rand); + } + // Assemble net Conv::NetGraph graph; Conv::InputLayer input_layer(data_tensor); diff --git a/tools/trainNetwork.cpp b/tools/trainNetwork.cpp index ec2cf5d..a682fc8 100644 --- a/tools/trainNetwork.cpp +++ b/tools/trainNetwork.cpp @@ -191,8 +191,6 @@ int main (int argc, char* argv[]) { Conv::TensorStreamDataset* testing_dataset = Conv::TensorStreamDataset::CreateFromConfiguration (dataset_config_file, false, Conv::LOAD_TESTING_ONLY); testing_graph = new Conv::NetGraph(); - int tdata_layer_id = 0; - Conv::DatasetInputLayer* tdata_layer = nullptr; tdata_layer = new Conv::DatasetInputLayer (*testing_dataset, BATCHSIZE, 1.0, 983923); Conv::NetGraphNode* tinput_node = new Conv::NetGraphNode(tdata_layer);