@@ -55,8 +55,8 @@ namespace {
5555
5656// Futility margin
5757Value futility_margin (Depth d, bool noTtCutNode, bool improving, bool oppWorsening) {
58- Value futilityMult = 121 - 43 * noTtCutNode;
59- Value improvingDeduction = 3 * improving * futilityMult / 2 ;
58+ Value futilityMult = 122 - 46 * noTtCutNode;
59+ Value improvingDeduction = 57 * improving * futilityMult / 32 ;
6060 Value worseningDeduction = (331 + 45 * improving) * oppWorsening * futilityMult / 1024 ;
6161
6262 return futilityMult * d - improvingDeduction - worseningDeduction;
@@ -69,15 +69,15 @@ constexpr int futility_move_count(bool improving, Depth depth) {
6969// Add correctionHistory value to raw staticEval and guarantee evaluation does not hit the tablebase range
7070Value to_corrected_static_eval (Value v, const Worker& w, const Position& pos) {
7171 auto cv = w.correctionHistory [pos.side_to_move ()][pawn_structure_index<Correction>(pos)];
72- v += cv * std::abs (cv) / 10759 ;
72+ v += cv * std::abs (cv) / 11450 ;
7373 return std::clamp (v, VALUE_TB_LOSS_IN_MAX_PLY + 1 , VALUE_TB_WIN_IN_MAX_PLY - 1 );
7474}
7575
7676// History and stats update bonus, based on depth
7777int stat_bonus (Depth d) { return std::min (249 * d - 327 , 1192 ); }
7878
7979// History and stats update malus, based on depth
80- int stat_malus (Depth d) { return std::min (516 * d - 299 , 1432 ); }
80+ int stat_malus (Depth d) { return std::min (516 * d - 299 , 1254 ); }
8181
8282// Add a small random component to draw evaluations to avoid 3-fold blindness
8383Value value_draw (size_t nodes) { return VALUE_DRAW - 1 + Value (nodes & 0x2 ); }
@@ -301,12 +301,12 @@ void Search::Worker::iterative_deepening() {
301301
302302 // Reset aspiration window starting size
303303 Value avg = rootMoves[pvIdx].averageScore ;
304- delta = 9 + avg * avg / 12804 ;
304+ delta = 9 + avg * avg / 12800 ;
305305 alpha = std::max (avg - delta, -VALUE_INFINITE);
306306 beta = std::min (avg + delta, VALUE_INFINITE);
307307
308308 // Adjust optimism based on root move's averageScore (~4 Elo)
309- optimism[us] = 131 * avg / (std::abs (avg) + 90 );
309+ optimism[us] = 130 * avg / (std::abs (avg) + 90 );
310310 optimism[~us] = -optimism[us];
311311
312312 // Start with a small aspiration window and, in the case of a fail
@@ -500,7 +500,7 @@ void Search::Worker::clear() {
500500 h->fill (-71 );
501501
502502 for (size_t i = 1 ; i < reductions.size (); ++i)
503- reductions[i] = int ((19.02 + std::log (size_t (options[" Threads" ])) / 2 ) * std::log (i));
503+ reductions[i] = int ((19.80 + std::log (size_t (options[" Threads" ])) / 2 ) * std::log (i));
504504}
505505
506506
@@ -732,12 +732,12 @@ Value Search::Worker::search(
732732 // Use static evaluation difference to improve quiet move ordering (~9 Elo)
733733 if (((ss - 1 )->currentMove ).is_ok () && !(ss - 1 )->inCheck && !priorCapture)
734734 {
735- int bonus = std::clamp (-14 * int ((ss - 1 )->staticEval + ss->staticEval ), -1621 , 1237 );
735+ int bonus = std::clamp (-14 * int ((ss - 1 )->staticEval + ss->staticEval ), -1621 , 1238 );
736736 bonus = bonus > 0 ? 2 * bonus : bonus / 2 ;
737737 thisThread->mainHistory [~us][((ss - 1 )->currentMove ).from_to ()] << bonus;
738738 if (type_of (pos.piece_on (prevSq)) != PAWN && ((ss - 1 )->currentMove ).type_of () != PROMOTION)
739739 thisThread->pawnHistory [pawn_structure_index (pos)][pos.piece_on (prevSq)][prevSq]
740- << bonus / 4 ;
740+ << bonus / 2 ;
741741 }
742742
743743 // Set up the improving flag, which is true if current static evaluation is
@@ -828,7 +828,7 @@ Value Search::Worker::search(
828828 // Step 11. ProbCut (~10 Elo)
829829 // If we have a good enough capture (or queen promotion) and a reduced search returns a value
830830 // much above beta, we can (almost) safely prune the previous move.
831- probCutBeta = beta + 164 - 62 * improving;
831+ probCutBeta = beta + 168 - 64 * improving;
832832 if (
833833 !PvNode && depth > 3
834834 && std::abs (beta) < VALUE_TB_WIN_IN_MAX_PLY
@@ -1139,7 +1139,7 @@ Value Search::Worker::search(
11391139 + (*contHist[3 ])[movedPiece][move.to_sq ()] - 4587 ;
11401140
11411141 // Decrease/increase reduction for moves with a good/bad history (~8 Elo)
1142- r -= ss->statScore / 12372 ;
1142+ r -= ss->statScore / 14956 ;
11431143
11441144 // Step 17. Late moves reduction / extension (LMR, ~117 Elo)
11451145 if (depth >= 2 && moveCount > 1 + rootNode)
@@ -1627,7 +1627,7 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta,
16271627
16281628Depth Search::Worker::reduction (bool i, Depth d, int mn, int delta) {
16291629 int reductionScale = reductions[d] * reductions[mn];
1630- return (reductionScale + 1091 - delta * 759 / rootDelta) / 1024 + (!i && reductionScale > 952 );
1630+ return (reductionScale + 1091 - delta * 759 / rootDelta) / 1024 + (!i && reductionScale > 950 );
16311631}
16321632
16331633namespace {
0 commit comments