View difference between Paste ID: Qye00cgx and fMF6TKfE
SHOW: | | - or go back to the newest paste.
1
#include <cmath>
2
#include <random>
3
#include <iostream>
4
#include <time.h>
5
using namespace std;
6-
6+
 
7
template<typename T>
8
struct Vector {
9
private:
10-
	T* elements;
10+
    T* elements;
11-
	size_t _size;
11+
    size_t _size;
12
public:
13-
	Vector() : elements(nullptr), _size(0) {}
13+
    Vector() : elements(nullptr), _size(0) {}
14-
	Vector(const size_t size) {
14+
    Vector(const size_t size) {
15-
		this->elements = new T[size]{ T(0) };
15+
        this->elements = new T[size]{ T(0) };
16-
		this->_size = size;
16+
        this->_size = size;
17-
	}
17+
    }
18-
	Vector(const Vector<T>& other) {
18+
    Vector(const Vector<T>& other) {
19-
		this->_size = other._size;
19+
        this->_size = other._size;
20-
		this->elements = new T[_size];
20+
        this->elements = new T[_size];
21-
		for (size_t i = 0; i < _size; ++i)
21+
        for (size_t i = 0; i < _size; ++i)
22-
			this->elements[i] = other.elements[i];
22+
            this->elements[i] = other.elements[i];
23-
	}
23+
    }
24-
	Vector<T>& operator =(const Vector<T>& other) {
24+
    Vector<T>& operator =(const Vector<T>& other) {
25-
		if (this == &other)
25+
        if (this == &other)
26-
			return *this;
26+
            return *this;
27-
		delete[] this->elements;
27+
        delete[] this->elements;
28-
		this->_size = other._size;
28+
        this->_size = other._size;
29-
		this->elements = new T[this->_size];
29+
        this->elements = new T[this->_size];
30-
		for (size_t i = 0; i < this->_size; ++i)
30+
        for (size_t i = 0; i < this->_size; ++i)
31-
			this->elements[i] = other.elements[i];
31+
            this->elements[i] = other.elements[i];
32-
		return *this;
32+
        return *this;
33-
	}
33+
    }
34-
	Vector(Vector<T>&& other) : _size(other._size), elements(other.elements) {
34+
    Vector(Vector<T>&& other) : _size(other._size), elements(other.elements) {
35-
		other._size = 0;
35+
        other._size = 0;
36-
		other.elements = nullptr;
36+
        other.elements = nullptr;
37-
	}
37+
    }
38-
	Vector<T>& operator =(Vector<T>&& other) {
38+
    Vector<T>& operator =(Vector<T>&& other) {
39-
		if (this == &other)
39+
        if (this == &other)
40-
			return *this;
40+
            return *this;
41-
41+
 
42-
		delete[] this->elements;
42+
        delete[] this->elements;
43-
43+
 
44-
		this->_size = other._size;
44+
        this->_size = other._size;
45-
		this->elements = other.elements;
45+
        this->elements = other.elements;
46-
		other._size = 0;
46+
        other._size = 0;
47-
		other.elements = nullptr;
47+
        other.elements = nullptr;
48-
48+
 
49-
		return *this;
49+
        return *this;
50-
	}
50+
    }
51-
	size_t size() const { return _size; }
51+
    size_t size() const { return _size; }
52-
	T& operator[](const size_t index) {
52+
    T& operator[](const size_t index) {
53-
		if (index < _size)
53+
        if (index < _size)
54-
			return elements[index];
54+
            return elements[index];
55-
	}
55+
    }
56-
	~Vector() {
56+
    ~Vector() {
57-
		delete[] elements;
57+
        delete[] elements;
58-
		this->_size = 0;
58+
        this->_size = 0;
59-
	}
59+
    }
60
};
61-
61+
 
62
template<typename T>
63
struct Matrix {
64
private:
65-
	Vector<T>* _matrix;
65+
    Vector<T>* _matrix;
66-
	size_t _rows;
66+
    size_t _rows;
67-
	size_t _cols;
67+
    size_t _cols;
68
public:
69-
	Matrix() : _matrix(nullptr), _rows(0), _cols(0) {}
69+
    Matrix() : _matrix(nullptr), _rows(0), _cols(0) {}
70-
	Matrix(const size_t rows, const size_t cols) {
70+
    Matrix(const size_t rows, const size_t cols) {
71-
		this->_matrix = new Vector<T>[rows];
71+
        this->_matrix = new Vector<T>[rows];
72-
		for (size_t i = 0; i < rows; ++i)
72+
        for (size_t i = 0; i < rows; ++i)
73-
			_matrix[i] = Vector<T>(cols);
73+
            _matrix[i] = Vector<T>(cols);
74-
		this->_rows = rows;
74+
        this->_rows = rows;
75-
		this->_cols = cols;
75+
        this->_cols = cols;
76-
	}
76+
    }
77-
	Matrix(const Matrix<T>& other) {
77+
    Matrix(const Matrix<T>& other) {
78-
		this->_rows = other._rows;
78+
        this->_rows = other._rows;
79-
		this->_cols = other._cols;
79+
        this->_cols = other._cols;
80-
		this->_matrix = new Vector<T>[_rows] { _cols };
80+
        this->_matrix = new Vector<T>[_rows] { _cols };
81-
		for (size_t i = 0; i < _rows; ++i)
81+
        for (size_t i = 0; i < _rows; ++i)
82-
			for (size_t j = 0; j < _cols; ++j)
82+
            for (size_t j = 0; j < _cols; ++j)
83-
				this->_matrix[i][j] = other._matrix[i][j];
83+
                this->_matrix[i][j] = other._matrix[i][j];
84-
	}
84+
    }
85-
	Matrix<T>& operator =(const Matrix<T>& other) {
85+
    Matrix<T>& operator =(const Matrix<T>& other) {
86-
		if (this == &other)
86+
        if (this == &other)
87-
			return *this;
87+
            return *this;
88-
		delete[] _matrix;
88+
        delete[] _matrix;
89-
		this->_rows = other._rows;
89+
        this->_rows = other._rows;
90-
		this->_cols = other._cols;
90+
        this->_cols = other._cols;
91-
		_matrix = new Vector<T>[this->_rows]{ this->_cols };
91+
        _matrix = new Vector<T>[this->_rows]{ this->_cols };
92-
		for (size_t i = 0; i < this->_rows; ++i)
92+
        for (size_t i = 0; i < this->_rows; ++i)
93-
			for (size_t j = 0; j < this->cols; ++j)
93+
            for (size_t j = 0; j < this->cols; ++j)
94-
				_matrix[i][j] = other._matrix[i][j];
94+
                _matrix[i][j] = other._matrix[i][j];
95-
		return *this;
95+
        return *this;
96-
	}
96+
    }
97-
	Matrix(Matrix<T>&& other)
97+
    Matrix(Matrix<T>&& other)
98-
		: _matrix(other._matrix), _rows(other._rows), _cols(other._cols) {
98+
        : _matrix(other._matrix), _rows(other._rows), _cols(other._cols) {
99-
		other._matrix = nullptr;
99+
        other._matrix = nullptr;
100-
		other._rows = 0;
100+
        other._rows = 0;
101-
		other._cols = 0;
101+
        other._cols = 0;
102-
	}
102+
    }
103-
	Matrix<T>& operator =(Matrix<T>&& other) {
103+
    Matrix<T>& operator =(Matrix<T>&& other) {
104-
		if (&other == this)
104+
        if (&other == this)
105-
			return *this;
105+
            return *this;
106-
		delete[] this->_matrix;
106+
        delete[] this->_matrix;
107-
		this->_matrix = other._matrix;
107+
        this->_matrix = other._matrix;
108-
		this->_rows = other._rows;
108+
        this->_rows = other._rows;
109-
		this->_cols = other._cols;
109+
        this->_cols = other._cols;
110-
		other._matrix = nullptr;
110+
        other._matrix = nullptr;
111-
		other._rows = 0;
111+
        other._rows = 0;
112-
		other._cols = 0;
112+
        other._cols = 0;
113-
		return *this;
113+
        return *this;
114-
	}
114+
    }
115-
	size_t rows() const { return _rows; }
115+
    size_t rows() const { return _rows; }
116-
	size_t cols() const { return _cols; }
116+
    size_t cols() const { return _cols; }
117-
	Vector<T>& operator[] (const size_t _rows) {
117+
    Vector<T>& operator[] (const size_t _rows) {
118-
		if (_rows < this->_rows)
118+
        if (_rows < this->_rows)
119-
			return _matrix[_rows];
119+
            return _matrix[_rows];
120-
	}
120+
    }
121-
	~Matrix() {
121+
    ~Matrix() {
122-
		delete[] _matrix;
122+
        delete[] _matrix;
123-
		this->_rows = 0;
123+
        this->_rows = 0;
124-
		this->_cols = 0;
124+
        this->_cols = 0;
125-
	}
125+
    }
126
};
127-
127+
 
128
struct NeuralNetwork {
129
private:
130-
130+
 
131-
	struct Neuron {
131+
    struct Neuron {
132-
		double_t value;
132+
        double_t value;
133-
		double_t error;
133+
        double_t error;
134-
	public:
134+
    public:
135-
		Neuron(double_t value = 0) : value(value), error(0) {}
135+
        Neuron(double_t value = 0) : value(value), error(0) {}
136-
		operator double_t& () { return value; }
136+
        operator double_t& () { return value; }
137-
	};
137+
    };
138-
138+
 
139-
	struct Weight {
139+
    struct Weight {
140-
		double_t value;
140+
        double_t value;
141-
		double_t delta;
141+
        double_t delta;
142-
	public:
142+
    public:
143-
		Weight(double_t value = 0) : value(value), delta(0) {}
143+
        Weight(double_t value = 0) : value(value), delta(0) {}
144-
		operator double_t& () { return value; }
144+
        operator double_t& () { return value; }
145-
	};
145+
    };
146-
146+
 
147-
	size_t count_layers;
147+
    size_t count_layers;
148-
	Matrix<Weight>* weights;
148+
    Matrix<Weight>* weights;
149-
	Vector<Neuron>* layers;
149+
    Vector<Neuron>* layers;
150-
	double_t E;
150+
    double_t E;
151-
	double_t a;
151+
    double_t a;
152-
152+
 
153-
	//Функция активации
153+
    //Функция активации
154-
	double_t(*activationFunc)(const double_t value) = [](const double_t value) {
154+
    double_t(*activationFunc)(const double_t value) = [](const double_t value) {
155-
		return value > 0.5 ? (double_t)1 : 0;
155+
        return value > 0.5 ? (double_t)1 : 0;
156-
	};
156+
    };
157-
	//Производная функции активации
157+
    //Производная функции активации
158-
	double_t(*derivativeFunc)(const double_t value) = [](const double_t value) {
158+
    double_t(*derivativeFunc)(const double_t value) = [](const double_t value) {
159-
		return (double_t)1;
159+
        return (double_t)1;
160-
	};
160+
    };
161-
	double_t getRandomWeight() { return (double_t)(rand()) / RAND_MAX - 0.5; }
161+
    double_t getRandomWeight() { return (double_t)(rand()) / RAND_MAX - 0.5; }
162
public:
163-
	NeuralNetwork(const size_t* dimensions, const size_t size,
163+
    NeuralNetwork(const size_t* dimensions, const size_t size,
164-
		const double_t E = 0.5, const double_t a = 0.5) {
164+
        const double_t E = 0.5, const double_t a = 0.5) {
165-
		srand(time(NULL));
165+
        srand(time(NULL));
166-
		this->count_layers = size;
166+
        this->count_layers = size;
167-
		this->weights = new Matrix<Weight>[count_layers - 1];
167+
        this->weights = new Matrix<Weight>[count_layers - 1];
168-
		this->layers = new Vector<Neuron>[count_layers];
168+
        this->layers = new Vector<Neuron>[count_layers];
169-
		this->E = E;
169+
        this->E = E;
170-
		this->a = a;
170+
        this->a = a;
171-
171+
 
172-
		//Инициализация весов и слоев
172+
        //Инициализация весов и слоев
173-
		for (size_t i = 0; i < count_layers - 2; ++i) {
173+
        for (size_t i = 0; i < count_layers - 2; ++i) {
174-
			weights[i] = Matrix<Weight>(dimensions[i] + 1, dimensions[i + 1] + 1);
174+
            weights[i] = Matrix<Weight>(dimensions[i] + 1, dimensions[i + 1] + 1);
175-
			layers[i] = Vector<Neuron>(dimensions[i]);
175+
            layers[i] = Vector<Neuron>(dimensions[i]);
176-
		}
176+
        }
177-
		weights[size - 2] = Matrix<Weight>(dimensions[size - 2] + 1, dimensions[size - 1]);
177+
        weights[size - 2] = Matrix<Weight>(dimensions[size - 2] + 1, dimensions[size - 1]);
178-
		layers[size - 2] = Vector<Neuron>(dimensions[size - 2]);
178+
        layers[size - 2] = Vector<Neuron>(dimensions[size - 2]);
179-
		layers[size - 1] = Vector<Neuron>(dimensions[size - 1]);
179+
        layers[size - 1] = Vector<Neuron>(dimensions[size - 1]);
180-
180+
 
181-
		fillRandomWeights();
181+
        fillRandomWeights();
182-
	}
182+
    }
183-
	//Заполнение весов случайными значениями от [-0.5, 0.5]
183+
    //Заполнение весов случайными значениями от [-0.5, 0.5]
184-
	void fillRandomWeights() {
184+
    void fillRandomWeights() {
185-
		for (size_t i = 0; i < count_layers - 1; ++i)
185+
        for (size_t i = 0; i < count_layers - 1; ++i)
186-
			for (size_t j = 0; j < layers[i].size(); ++j) {
186+
            for (size_t j = 0; j < layers[i].size(); ++j) {
187-
				for (size_t k = 0; k < layers[i + 1].size(); ++k)
187+
                for (size_t k = 0; k < layers[i + 1].size(); ++k)
188-
					weights[i][j][k] = getRandomWeight();
188+
                    weights[i][j][k] = getRandomWeight();
189-
			}
189+
            }
190-
	}
190+
    }
191-
	//Прямое прохождение
191+
    //Прямое прохождение
192-
	void feedForward() {
192+
    void feedForward() {
193-
		for (size_t i = 1; i < count_layers; ++i)
193+
        for (size_t i = 1; i < count_layers; ++i)
194-
			for (size_t j = 0; j < layers[i].size(); ++j) {
194+
            for (size_t j = 0; j < layers[i].size(); ++j) {
195-
				double_t summ = weights[i - 1][layers[i - 1].size()][j];
195+
                double_t summ = weights[i - 1][layers[i - 1].size()][j];
196-
				for (size_t k = 0; k < layers[i - 1].size(); ++k)
196+
                for (size_t k = 0; k < layers[i - 1].size(); ++k)
197-
					summ += layers[i - 1][k] * weights[i - 1][k][j];
197+
                    summ += layers[i - 1][k] * weights[i - 1][k][j];
198-
				layers[i][j] = activationFunc(summ);
198+
                layers[i][j] = activationFunc(summ);
199-
			}
199+
            }
200-
	}
200+
    }
201-
	//Расчет ошибок у выходных нейронов
201+
    //Расчет ошибок у выходных нейронов
202-
	void findOutError(double_t* ideals) {
202+
    void findOutError(double_t* ideals) {
203-
		size_t li = count_layers - 1;
203+
        size_t li = count_layers - 1;
204-
		for (size_t i = 0; i < layers[li].size(); ++i) {
204+
        for (size_t i = 0; i < layers[li].size(); ++i) {
205-
			layers[li][i].error = ideals[i] - layers[li][i];/*(ideals[i] - layers[li][i]) * derivativeFunc(layers[li][i]);*/
205+
            layers[li][i].error = ideals[i] - layers[li][i];/*(ideals[i] - layers[li][i]) * derivativeFunc(layers[li][i]);*/
206-
			//cout << layers[li][i].error << endl;
206+
            //cout << layers[li][i].error << endl;
207-
		}
207+
        }
208-
	}
208+
    }
209-
	//Расчет ошибок у скрытых нейронов
209+
    //Расчет ошибок у скрытых нейронов
210-
	void findHiddenError() {
210+
    void findHiddenError() {
211-
		for (size_t i = count_layers - 2; i >= 1; --i)
211+
        for (size_t i = count_layers - 2; i >= 1; --i)
212-
			for (size_t j = 0; j < layers[i].size(); ++j) {
212+
            for (size_t j = 0; j < layers[i].size(); ++j) {
213-
				double_t error = 0;
213+
                double_t error = 0;
214-
				for (size_t k = 0; k < layers[i + 1].size(); ++k)
214+
                for (size_t k = 0; k < layers[i + 1].size(); ++k)
215-
					error += layers[i + 1][k].error * weights[i][j][k];
215+
                    error += layers[i + 1][k].error * weights[i][j][k];
216-
				layers[i][j].error = derivativeFunc(layers[i][j]) * error;
216+
                layers[i][j].error = derivativeFunc(layers[i][j]) * error;
217-
			}
217+
            }
218-
	}
218+
    }
219-
	//Обратное прохождение
219+
    //Обратное прохождение
220-
	void backWards() {
220+
    void backWards() {
221-
		for (size_t i = 0; i < count_layers - 1; ++i) {
221+
        for (size_t i = 0; i < count_layers - 1; ++i) {
222-
			for (size_t j = 0; j < layers[i].size(); ++j) {
222+
            for (size_t j = 0; j < layers[i].size(); ++j) {
223-
				for (size_t k = 0; k < layers[i + 1].size(); ++k) {
223+
                for (size_t k = 0; k < layers[i + 1].size(); ++k) {
224-
					weights[i][j][k].delta = E * layers[i][j] * layers[i + 1][k].error + a * weights[i][j][k].delta;
224+
                    weights[i][j][k].delta = E * layers[i][j] * layers[i + 1][k].error + a * weights[i][j][k].delta;
225-
					weights[i][j][k] += weights[i][j][k].delta;
225+
                    weights[i][j][k] += weights[i][j][k].delta;
226-
				}
226+
                }
227-
			}
227+
            }
228-
			//Корректировка весок связанные с нейронами смещения
228+
            //Корректировка весок связанные с нейронами смещения
229-
			for (size_t k = 0; k < layers[i + 1].size(); ++k) {
229+
            for (size_t k = 0; k < layers[i + 1].size(); ++k) {
230-
				weights[i][layers[i].size()][k].delta = E * layers[i + 1][k].error + a * weights[i][layers[i].size()][k].delta;
230+
                weights[i][layers[i].size()][k].delta = E * layers[i + 1][k].error + a * weights[i][layers[i].size()][k].delta;
231-
				weights[i][layers[i].size()][k] += weights[i][layers[i].size()][k].delta;
231+
                weights[i][layers[i].size()][k] += weights[i][layers[i].size()][k].delta;
232-
			}
232+
            }
233-
		}
233+
        }
234-
	}
234+
    }
235-
	//Установка входных данных
235+
    //Установка входных данных
236-
	void setInputs(double_t* inputs) {
236+
    void setInputs(double_t* inputs) {
237-
		for (size_t i = 0; i < layers[0].size(); ++i)
237+
        for (size_t i = 0; i < layers[0].size(); ++i)
238-
			layers[0][i] = inputs[i];
238+
            layers[0][i] = inputs[i];
239-
	}
239+
    }
240-
	//Расчет ошибки сета
240+
    //Расчет ошибки сета
241-
	double_t getTrainError(double_t* ideals) {
241+
    double_t getTrainError(double_t* ideals) {
242-
		size_t li = count_layers - 1;
242+
        size_t li = count_layers - 1;
243-
		double_t error = 0;
243+
        double_t error = 0;
244-
		//cout << "Выходные значения нейронной сети: [";
244+
        //cout << "Выходные значения нейронной сети: [";
245-
		for (size_t i = 0; i < layers[li].size(); ++i) {
245+
        for (size_t i = 0; i < layers[li].size(); ++i) {
246-
			error += (ideals[i] - layers[li][i]) * (ideals[i] - layers[li][i]);
246+
            error += (ideals[i] - layers[li][i]) * (ideals[i] - layers[li][i]);
247-
			//cout << i << " = " << layers[li][i] << " ";
247+
            //cout << i << " = " << layers[li][i] << " ";
248-
		}
248+
        }
249-
		//cout << "]" << endl;
249+
        //cout << "]" << endl;
250-
		return error / layers[li].size();
250+
        return error / layers[li].size();
251-
	}
251+
    }
252-
	//Функция обучения
252+
    //Функция обучения
253-
	void study(double_t** inputs, double_t** ideals, size_t count) {
253+
    void study(double_t** inputs, double_t** ideals, size_t count) {
254-
		double_t error = 0;
254+
        double_t error = 0;
255-
		size_t epoch = 0;
255+
        size_t epoch = 0;
256-
		do {
256+
        do {
257-
			error = 0;
257+
            error = 0;
258-
			for (size_t stepTrain = 0; stepTrain < count; ++stepTrain) {
258+
            for (size_t stepTrain = 0; stepTrain < count; ++stepTrain) {
259-
				/*cout << "Идеальные ответы: " << inputs[stepTrain][0] << " ";
259+
                /*cout << "Идеальные ответы: " << inputs[stepTrain][0] << " ";
260-
				if (inputs[stepTrain][2] == 0) cout << "& ";
260+
                if (inputs[stepTrain][2] == 0) cout << "& ";
261-
				else cout << (inputs[stepTrain][2] == 0.5 ? "| " : "^ ");
261+
                else cout << (inputs[stepTrain][2] == 0.5 ? "| " : "^ ");
262-
				cout << inputs[stepTrain][1] << " = [0 = " << ideals[stepTrain][0] << ", 1 = " << ideals[stepTrain][1] << "]\n";*/
262+
                cout << inputs[stepTrain][1] << " = [0 = " << ideals[stepTrain][0] << ", 1 = " << ideals[stepTrain][1] << "]\n";*/
263-
				setInputs(inputs[stepTrain]);
263+
                setInputs(inputs[stepTrain]);
264-
				feedForward();
264+
                feedForward();
265-
				findOutError(ideals[stepTrain]);
265+
                findOutError(ideals[stepTrain]);
266-
				findHiddenError();
266+
                findHiddenError();
267-
				backWards();
267+
                backWards();
268-
				double_t test = getTrainError(ideals[stepTrain]);
268+
                double_t test = getTrainError(ideals[stepTrain]);
269-
				error += test;
269+
                error += test;
270-
				cout << "Итоговая ошибка сета: " << test << endl;
270+
                cout << "Итоговая ошибка сета: " << test << endl;
271-
			}
271+
            }
272-
			error /= count;
272+
            error /= count;
273-
			++epoch;
273+
            ++epoch;
274-
			cout << epoch << "  " << error << endl;
274+
            cout << epoch << "  " << error << endl;
275-
		} while (error >= 0.05);
275+
        } while (error >= 0.05);
276-
	}
276+
    }
277-
	//Получить выходные значения
277+
    //Получить выходные значения
278-
	Vector<double_t> getOutputs(double_t* inputs) {
278+
    Vector<double_t> getOutputs(double_t* inputs) {
279-
		setInputs(inputs);
279+
        setInputs(inputs);
280-
		feedForward();
280+
        feedForward();
281-
		Vector<double_t> res(layers[count_layers - 1].size());
281+
        Vector<double_t> res(layers[count_layers - 1].size());
282-
		for (size_t i = 0; i < res.size(); ++i)
282+
        for (size_t i = 0; i < res.size(); ++i)
283-
			res[i] = layers[count_layers - 1][i];
283+
            res[i] = layers[count_layers - 1][i];
284-
		return res;
284+
        return res;
285-
	}
285+
    }
286-
	~NeuralNetwork() {
286+
    ~NeuralNetwork() {
287-
		delete[] weights;
287+
        delete[] weights;
288-
		delete[] layers;
288+
        delete[] layers;
289-
	}
289+
    }
290
};
291-
291+
 
292
double_t normalize_data(double_t val, double_t min, double_t max) {
293-
	return (val - min) / (max - min);
293+
    return (val - min) / (max - min);
294
}
295
double_t denormalize_data(double_t val, double_t min, double_t max) {
296-
	return min + val * (max - min);
296+
    return min + val * (max - min);
297
}
298-
298+
 
299
int main() {
300-
	setlocale(LC_ALL, "Russian");
300+
    setlocale(LC_ALL, "Russian");
301-
	//Размеры слоёв
301+
  	getchar();
302-
	size_t* dimensions = new size_t[]{ 3, 2, 2 };
302+
    //Размеры слоёв
303-
	//Тренировочный сет
303+
    size_t* dimensions = new size_t[3]{ 3, 2, 2 };
304-
	double_t** trainSet = new double_t * [] {
304+
    //Тренировочный сет
305-
		new double_t[]{ 0, 0, 2 },
305+
    double_t** trainSet = new double_t * [4] {
306-
		new double_t[]{ 0, 1, 2 },
306+
        new double_t[3]{ 0, 0, 2 },
307-
		new double_t[]{ 1, 0, 2 },
307+
        new double_t[3]{ 0, 1, 2 },
308-
		new double_t[]{ 1, 1, 2 }
308+
        new double_t[3]{ 1, 0, 2 },
309-
	};
309+
        new double_t[3]{ 1, 1, 2 }
310-
	//Нормализация данных
310+
    };
311-
	for (size_t i = 0; i < 4; ++i)
311+
    //Нормализация данных
312-
		trainSet[i][2] = normalize_data(trainSet[i][2], 0, 2);
312+
    for (size_t i = 0; i < 4; ++i)
313-
	//Идеальные значения
313+
        trainSet[i][2] = normalize_data(trainSet[i][2], 0, 2);
314-
	double_t** ideals = new double_t * [] {
314+
    //Идеальные значения
315-
		new double_t[]{ 1, 0 }, new double_t[]{ 0, 1 },
315+
    double_t** ideals = new double_t * [4] {
316-
		new double_t[]{ 0, 1 }, new double_t[]{ 1, 0 }
316+
        new double_t[2]{ 1, 0 }, new double_t[2]{ 0, 1 },
317-
	};
317+
        new double_t[2]{ 0, 1 }, new double_t[2]{ 1, 0 }
318-
	size_t size_train_set = 4;
318+
    };
319-
319+
    size_t size_train_set = 4;
320-
	NeuralNetwork n(dimensions, 3, 0.4, 0.4);
320+
 
321-
	n.study(trainSet, ideals, size_train_set);
321+
    NeuralNetwork n(dimensions, 3, 0.4, 0.4);
322-
322+
    n.study(trainSet, ideals, size_train_set);
323-
	delete[] dimensions;
323+
 
324-
	for (size_t i = 0; i < size_train_set; ++i) {
324+
    delete[] dimensions;
325-
		delete[] trainSet[i];
325+
    for (size_t i = 0; i < size_train_set; ++i) {
326-
		delete[] ideals[i];
326+
        delete[] trainSet[i];
327-
	}
327+
        delete[] ideals[i];
328-
	delete[] trainSet;
328+
    }
329-
	delete[] ideals;
329+
    delete[] trainSet;
330
    delete[] ideals;
331
}