Commit d577987f by Ismael

changed indentation

parent ea69a847
......@@ -21,21 +21,21 @@
// Internal function to calculate the different scalability forms
double FittingCurve(double n, benchmark::BigO complexity) {
switch (complexity) {
case benchmark::oN:
return n;
case benchmark::oNSquared:
return pow(n, 2);
case benchmark::oNCubed:
return pow(n, 3);
case benchmark::oLogN:
return log2(n);
case benchmark::oNLogN:
return n * log2(n);
case benchmark::o1:
default:
return 1;
}
switch (complexity) {
case benchmark::oN:
return n;
case benchmark::oNSquared:
return pow(n, 2);
case benchmark::oNCubed:
return pow(n, 3);
case benchmark::oLogN:
return log2(n);
case benchmark::oNLogN:
return n * log2(n);
case benchmark::o1:
default:
return 1;
}
}
// Internal function to find the coefficient for the high-order term in the running time, by minimizing the sum of squares of relative error.
......@@ -45,44 +45,44 @@ double FittingCurve(double n, benchmark::BigO complexity) {
// For a deeper explanation on the algorithm logic, look the README file at http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit
LeastSq CalculateLeastSq(const std::vector<int>& n, const std::vector<double>& time, const benchmark::BigO complexity) {
CHECK_NE(complexity, benchmark::oAuto);
double sigma_gn = 0;
double sigma_gn_squared = 0;
double sigma_time = 0;
double sigma_time_gn = 0;
// Calculate least square fitting parameter
for (size_t i = 0; i < n.size(); ++i) {
double gn_i = FittingCurve(n[i], complexity);
sigma_gn += gn_i;
sigma_gn_squared += gn_i * gn_i;
sigma_time += time[i];
sigma_time_gn += time[i] * gn_i;
}
LeastSq result;
result.complexity = complexity;
// Calculate complexity.
// o1 is treated as an special case
if (complexity != benchmark::o1)
result.coef = sigma_time_gn / sigma_gn_squared;
else
result.coef = sigma_time / n.size();
// Calculate RMS
double rms = 0;
for (size_t i = 0; i < n.size(); ++i) {
double fit = result.coef * FittingCurve(n[i], complexity);
rms += pow((time[i] - fit), 2);
}
double mean = sigma_time / n.size();
result.rms = sqrt(rms / n.size()) / mean; // Normalized RMS by the mean of the observed values
return result;
CHECK_NE(complexity, benchmark::oAuto);
double sigma_gn = 0;
double sigma_gn_squared = 0;
double sigma_time = 0;
double sigma_time_gn = 0;
// Calculate least square fitting parameter
for (size_t i = 0; i < n.size(); ++i) {
double gn_i = FittingCurve(n[i], complexity);
sigma_gn += gn_i;
sigma_gn_squared += gn_i * gn_i;
sigma_time += time[i];
sigma_time_gn += time[i] * gn_i;
}
LeastSq result;
result.complexity = complexity;
// Calculate complexity.
// o1 is treated as an special case
if (complexity != benchmark::o1)
result.coef = sigma_time_gn / sigma_gn_squared;
else
result.coef = sigma_time / n.size();
// Calculate RMS
double rms = 0;
for (size_t i = 0; i < n.size(); ++i) {
double fit = result.coef * FittingCurve(n[i], complexity);
rms += pow((time[i] - fit), 2);
}
double mean = sigma_time / n.size();
result.rms = sqrt(rms / n.size()) / mean; // Normalized RMS by the mean of the observed values
return result;
}
// Find the coefficient for the high-order term in the running time, by minimizing the sum of squares of relative error.
......@@ -92,24 +92,24 @@ LeastSq CalculateLeastSq(const std::vector<int>& n, const std::vector<double>& t
// the best fitting curve.
LeastSq MinimalLeastSq(const std::vector<int>& n, const std::vector<double>& time, const benchmark::BigO complexity) {
CHECK_EQ(n.size(), time.size());
CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two benchmark runs are given
CHECK_NE(complexity, benchmark::oNone);
if(complexity == benchmark::oAuto) {
std::vector<benchmark::BigO> fit_curves = { benchmark::oLogN, benchmark::oN, benchmark::oNLogN, benchmark::oNSquared, benchmark::oNCubed };
LeastSq best_fit = CalculateLeastSq(n, time, benchmark::o1); // Take o1 as default best fitting curve
// Compute all possible fitting curves and stick to the best one
for (const auto& fit : fit_curves) {
LeastSq current_fit = CalculateLeastSq(n, time, fit);
if (current_fit.rms < best_fit.rms)
best_fit = current_fit;
}
return best_fit;
}
else
return CalculateLeastSq(n, time, complexity);
CHECK_EQ(n.size(), time.size());
CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two benchmark runs are given
CHECK_NE(complexity, benchmark::oNone);
if(complexity == benchmark::oAuto) {
std::vector<benchmark::BigO> fit_curves = { benchmark::oLogN, benchmark::oN, benchmark::oNLogN, benchmark::oNSquared, benchmark::oNCubed };
LeastSq best_fit = CalculateLeastSq(n, time, benchmark::o1); // Take o1 as default best fitting curve
// Compute all possible fitting curves and stick to the best one
for (const auto& fit : fit_curves) {
LeastSq current_fit = CalculateLeastSq(n, time, fit);
if (current_fit.rms < best_fit.rms)
best_fit = current_fit;
}
return best_fit;
}
else
return CalculateLeastSq(n, time, complexity);
}
\ No newline at end of file
......@@ -30,14 +30,14 @@
// best fitting curve detected.
struct LeastSq {
LeastSq() :
coef(0),
rms(0),
complexity(benchmark::oNone) {}
double coef;
double rms;
benchmark::BigO complexity;
LeastSq() :
coef(0),
rms(0),
complexity(benchmark::oNone) {}
double coef;
double rms;
benchmark::BigO complexity;
};
// Find the coefficient for the high-order term in the running time, by minimizing the sum of squares of relative error.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment