mirror of
https://github.com/GoldenCheetah/GoldenCheetah.git
synced 2026-02-14 16:39:57 +00:00
Revert "Code Cleanup: RideFileCache remove mallocs"
This reverts commit 6fdc55528f.
The performance decrease is considerable. Using mallocs across
my personal ride collection takes 5:19 to compute all the caches.
Using the QVector equivalent took 22:40.
Need to consider looking at QVector across the GC codebase.
This commit is contained in:
@@ -489,11 +489,11 @@ void RideFileCache::RideFileCache::compute()
|
||||
|
||||
*/
|
||||
|
||||
void
|
||||
data_t *
|
||||
MeanMaxComputer::integrate_series(cpintdata &data)
|
||||
{
|
||||
// would be better to do pure QT and use QVector -- but no memory leak
|
||||
integrated.resize(data.points.size()+1);
|
||||
data_t *integrated= (data_t *)malloc(sizeof(data_t)*(data.points.size()+1));
|
||||
int i;
|
||||
data_t acc=0;
|
||||
|
||||
@@ -503,11 +503,11 @@ MeanMaxComputer::integrate_series(cpintdata &data)
|
||||
}
|
||||
integrated[i]=acc;
|
||||
|
||||
return;
|
||||
return integrated;
|
||||
}
|
||||
|
||||
data_t
|
||||
MeanMaxComputer::partial_max_mean(int start, int end, int length, int *offset)
|
||||
MeanMaxComputer::partial_max_mean(data_t *dataseries_i, int start, int end, int length, int *offset)
|
||||
{
|
||||
int i=0;
|
||||
data_t candidate=0;
|
||||
@@ -515,7 +515,7 @@ MeanMaxComputer::partial_max_mean(int start, int end, int length, int *offset)
|
||||
int best_i=0;
|
||||
|
||||
for (i=start; i<(1+end-length); i++) {
|
||||
data_t test_energy=integrated[length+i]-integrated[i];
|
||||
data_t test_energy=dataseries_i[length+i]-dataseries_i[i];
|
||||
if (test_energy>candidate) {
|
||||
candidate=test_energy;
|
||||
best_i=i;
|
||||
@@ -528,7 +528,7 @@ MeanMaxComputer::partial_max_mean(int start, int end, int length, int *offset)
|
||||
|
||||
|
||||
data_t
|
||||
MeanMaxComputer::divided_max_mean(int datalength, int length, int *offset)
|
||||
MeanMaxComputer::divided_max_mean(data_t *dataseries_i, int datalength, int length, int *offset)
|
||||
{
|
||||
int shift=length;
|
||||
|
||||
@@ -549,12 +549,12 @@ MeanMaxComputer::divided_max_mean(int datalength, int length, int *offset)
|
||||
|
||||
for (start=0; start+window_length<=datalength; start+=shift) {
|
||||
end=start+window_length;
|
||||
energy=integrated[end]-integrated[start];
|
||||
energy=dataseries_i[end]-dataseries_i[start];
|
||||
|
||||
if (energy < candidate) {
|
||||
continue;
|
||||
}
|
||||
data_t window_mm=partial_max_mean(start, end, length, &this_offset);
|
||||
data_t window_mm=partial_max_mean(dataseries_i, start, end, length, &this_offset);
|
||||
|
||||
if (window_mm>candidate) {
|
||||
candidate=window_mm;
|
||||
@@ -568,11 +568,11 @@ MeanMaxComputer::divided_max_mean(int datalength, int length, int *offset)
|
||||
if (end<datalength) {
|
||||
start=datalength-window_length;
|
||||
end=datalength;
|
||||
energy=integrated[end]-integrated[start];
|
||||
energy=dataseries_i[end]-dataseries_i[start];
|
||||
|
||||
if (energy >= candidate) {
|
||||
|
||||
data_t window_mm=partial_max_mean(start, end, length, &this_offset);
|
||||
data_t window_mm=partial_max_mean(dataseries_i, start, end, length, &this_offset);
|
||||
|
||||
if (window_mm>candidate) {
|
||||
candidate=window_mm;
|
||||
@@ -760,12 +760,12 @@ MeanMaxComputer::run()
|
||||
// the bests go in here...
|
||||
QVector <double> ride_bests(total_secs + 1);
|
||||
|
||||
integrate_series(data);
|
||||
data_t *dataseries_i = integrate_series(data);
|
||||
|
||||
for (int i=1; i<data.points.size(); i++) {
|
||||
|
||||
int offset;
|
||||
data_t c=divided_max_mean(data.points.size(),i,&offset);
|
||||
data_t c=divided_max_mean(dataseries_i,data.points.size(),i,&offset);
|
||||
|
||||
// snaffle it away
|
||||
int sec = i*ride->recIntSecs();
|
||||
@@ -778,6 +778,7 @@ MeanMaxComputer::run()
|
||||
ride_bests[sec] = val;
|
||||
}
|
||||
}
|
||||
free(dataseries_i);
|
||||
|
||||
//
|
||||
// FILL IN THE GAPS AND FILL TARGET ARRAY
|
||||
|
||||
Reference in New Issue
Block a user