@@ -51,27 +51,6 @@ static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) {
5151 }
5252}
5353
54- // Adjust stats to compensate; for example before committing a range,
55- // first adjust downwards with parts that were already committed so
56- // we avoid double counting.
57- static void mi_stat_adjust (mi_stat_count_t * stat , int64_t amount ) {
58- if (amount == 0 ) return ;
59- if mi_unlikely (mi_is_in_main (stat ))
60- {
61- // adjust atomically
62- mi_atomic_addi64_relaxed (& stat -> current , amount );
63- mi_atomic_addi64_relaxed (& stat -> allocated , amount );
64- mi_atomic_addi64_relaxed (& stat -> freed , amount );
65- }
66- else {
67- // don't affect the peak
68- stat -> current += amount ;
69- // add to both
70- stat -> allocated += amount ;
71- stat -> freed += amount ;
72- }
73- }
74-
7554void _mi_stat_counter_increase (mi_stat_counter_t * stat , size_t amount ) {
7655 if (mi_is_in_main (stat )) {
7756 mi_atomic_addi64_relaxed ( & stat -> count , 1 );
@@ -91,13 +70,7 @@ void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount) {
9170 mi_stat_update (stat , - ((int64_t )amount ));
9271}
9372
94- void _mi_stat_adjust_increase (mi_stat_count_t * stat , size_t amount ) {
95- mi_stat_adjust (stat , (int64_t )amount );
96- }
9773
98- void _mi_stat_adjust_decrease (mi_stat_count_t * stat , size_t amount ) {
99- mi_stat_adjust (stat , - ((int64_t )amount ));
100- }
10174
10275// must be thread safe as it is called from stats_merge
10376static void mi_stat_add (mi_stat_count_t * stat , const mi_stat_count_t * src , int64_t unit ) {
0 commit comments