Skip to content

Commit

Permalink
VVLTC search tune
Browse files Browse the repository at this point in the history
This patch is the result of two tuning stages:
1. ~32k games at 60+0.6 th8:
   https://tests.stockfishchess.org/tests/view/662d9dea6115ff6764c7f817
2. ~193k games at 80+0.8 th6, based on PR #5211:
   https://tests.stockfishchess.org/tests/view/663587e273559a8aa857ca00.
   Based on extensive VVLTC tuning and testing both before and after
   #5211, it is observed that introduction of new extensions positively
   affected the search tune results.

Passed VVLTC 70+0.7 th7 1st sprt: https://tests.stockfishchess.org/tests/view/6636c6f04b68b70d85801409
LLR: 2.94 (-2.94,2.94) <0.00,2.00>
Total: 18566 W: 4864 L: 4620 D: 9082
Ptnml(0-2): 0, 1608, 5827, 1844, 4

Passed VVLTC 70+0.7 th7 2nd sprt: https://tests.stockfishchess.org/tests/view/6636d4b84b68b70d85802ab7
LLR: 2.94 (-2.94,2.94) <0.50,2.50>
Total: 43142 W: 11141 L: 10838 D: 21163
Ptnml(0-2): 4, 3915, 13427, 4224, 1

Passed VVLTC 70+0.7 3rd sprt: https://tests.stockfishchess.org/tests/view/66376b4f9819650825aa230b
LLR: 2.94 (-2.94,2.94) <0.50,2.50>
Total: 40322 W: 10374 L: 10076 D: 19872
Ptnml(0-2): 1, 3660, 12544, 3952, 4

The first two sprts were run against passed #5211. The third sprt was
run against latest master.

closes #5216

Bench: 2180675
  • Loading branch information
XInTheDark authored and Disservin committed May 5, 2024
1 parent 61f12a4 commit 070e564
Showing 1 changed file with 40 additions and 40 deletions.
80 changes: 40 additions & 40 deletions src/search.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,9 @@ static constexpr double EvalLevel[10] = {0.981, 0.956, 0.895, 0.949, 0.913,

// Futility margin
Value futility_margin(Depth d, bool noTtCutNode, bool improving, bool oppWorsening) {
Value futilityMult = 118 - 45 * noTtCutNode;
Value improvingDeduction = 52 * improving * futilityMult / 32;
Value worseningDeduction = (316 + 48 * improving) * oppWorsening * futilityMult / 1024;
Value futilityMult = 126 - 46 * noTtCutNode;
Value improvingDeduction = 58 * improving * futilityMult / 32;
Value worseningDeduction = (323 + 52 * improving) * oppWorsening * futilityMult / 1024;

return futilityMult * d - improvingDeduction - worseningDeduction;
}
Expand All @@ -73,15 +73,15 @@ constexpr int futility_move_count(bool improving, Depth depth) {
// Add correctionHistory value to raw staticEval and guarantee evaluation does not hit the tablebase range
Value to_corrected_static_eval(Value v, const Worker& w, const Position& pos) {
auto cv = w.correctionHistory[pos.side_to_move()][pawn_structure_index<Correction>(pos)];
v += cv * std::abs(cv) / 9260;
v += cv * std::abs(cv) / 7350;
return std::clamp(v, VALUE_TB_LOSS_IN_MAX_PLY + 1, VALUE_TB_WIN_IN_MAX_PLY - 1);
}

// History and stats update bonus, based on depth
int stat_bonus(Depth d) { return std::clamp(214 * d - 318, 16, 1304); }
int stat_bonus(Depth d) { return std::clamp(208 * d - 297, 16, 1406); }

// History and stats update malus, based on depth
int stat_malus(Depth d) { return (d < 4 ? 572 * d - 284 : 1355); }
int stat_malus(Depth d) { return (d < 4 ? 520 * d - 312 : 1479); }

// Add a small random component to draw evaluations to avoid 3-fold blindness
Value value_draw(size_t nodes) { return VALUE_DRAW - 1 + Value(nodes & 0x2); }
Expand Down Expand Up @@ -310,12 +310,12 @@ void Search::Worker::iterative_deepening() {

// Reset aspiration window starting size
Value avg = rootMoves[pvIdx].averageScore;
delta = 10 + avg * avg / 11480;
delta = 10 + avg * avg / 9530;
alpha = std::max(avg - delta, -VALUE_INFINITE);
beta = std::min(avg + delta, VALUE_INFINITE);

// Adjust optimism based on root move's averageScore (~4 Elo)
optimism[us] = 122 * avg / (std::abs(avg) + 92);
optimism[us] = 119 * avg / (std::abs(avg) + 88);
optimism[~us] = -optimism[us];

// Start with a small aspiration window and, in the case of a fail
Expand Down Expand Up @@ -502,10 +502,10 @@ void Search::Worker::clear() {
for (StatsType c : {NoCaptures, Captures})
for (auto& to : continuationHistory[inCheck][c])
for (auto& h : to)
h->fill(-65);
h->fill(-60);

for (size_t i = 1; i < reductions.size(); ++i)
reductions[i] = int((20.14 + std::log(size_t(options["Threads"])) / 2) * std::log(i));
reductions[i] = int((18.93 + std::log(size_t(options["Threads"])) / 2) * std::log(i));

refreshTable.clear(networks);
}
Expand Down Expand Up @@ -738,7 +738,7 @@ Value Search::Worker::search(
// Use static evaluation difference to improve quiet move ordering (~9 Elo)
if (((ss - 1)->currentMove).is_ok() && !(ss - 1)->inCheck && !priorCapture)
{
int bonus = std::clamp(-14 * int((ss - 1)->staticEval + ss->staticEval), -1644, 1384);
int bonus = std::clamp(-13 * int((ss - 1)->staticEval + ss->staticEval), -1796, 1526);
bonus = bonus > 0 ? 2 * bonus : bonus / 2;
thisThread->mainHistory[~us][((ss - 1)->currentMove).from_to()] << bonus;
if (type_of(pos.piece_on(prevSq)) != PAWN && ((ss - 1)->currentMove).type_of() != PROMOTION)
Expand All @@ -761,7 +761,7 @@ Value Search::Worker::search(
// If eval is really low check with qsearch if it can exceed alpha, if it can't,
// return a fail low.
// Adjust razor margin according to cutoffCnt. (~1 Elo)
if (eval < alpha - 471 - (275 - 148 * ((ss + 1)->cutoffCnt > 3)) * depth * depth)
if (eval < alpha - 433 - (302 - 141 * ((ss + 1)->cutoffCnt > 3)) * depth * depth)
{
value = qsearch<NonPV>(pos, ss, alpha - 1, alpha);
if (value < alpha)
Expand All @@ -770,23 +770,23 @@ Value Search::Worker::search(

// Step 8. Futility pruning: child node (~40 Elo)
// The depth condition is important for mate finding.
if (!ss->ttPv && depth < 12
if (!ss->ttPv && depth < 11
&& eval - futility_margin(depth, cutNode && !ss->ttHit, improving, opponentWorsening)
- (ss - 1)->statScore / 286
- (ss - 1)->statScore / 254
>= beta
&& eval >= beta && eval < VALUE_TB_WIN_IN_MAX_PLY && (!ttMove || ttCapture))
return beta > VALUE_TB_LOSS_IN_MAX_PLY ? (eval + beta) / 2 : eval;

// Step 9. Null move search with verification search (~35 Elo)
if (!PvNode && (ss - 1)->currentMove != Move::null() && (ss - 1)->statScore < 18001
&& eval >= beta && ss->staticEval >= beta - 21 * depth + 312 && !excludedMove
if (!PvNode && (ss - 1)->currentMove != Move::null() && (ss - 1)->statScore < 16993
&& eval >= beta && ss->staticEval >= beta - 19 * depth + 326 && !excludedMove
&& pos.non_pawn_material(us) && ss->ply >= thisThread->nmpMinPly
&& beta > VALUE_TB_LOSS_IN_MAX_PLY)
{
assert(eval - beta >= 0);

// Null move dynamic reduction based on depth and eval
Depth R = std::min(int(eval - beta) / 152, 6) + depth / 3 + 4;
Depth R = std::min(int(eval - beta) / 134, 6) + depth / 3 + 4;

ss->currentMove = Move::null();
ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0];
Expand Down Expand Up @@ -834,7 +834,7 @@ Value Search::Worker::search(
// Step 11. ProbCut (~10 Elo)
// If we have a good enough capture (or queen promotion) and a reduced search returns a value
// much above beta, we can (almost) safely prune the previous move.
probCutBeta = beta + 169 - 63 * improving;
probCutBeta = beta + 159 - 66 * improving;
if (
!PvNode && depth > 3
&& std::abs(beta) < VALUE_TB_WIN_IN_MAX_PLY
Expand Down Expand Up @@ -890,7 +890,7 @@ Value Search::Worker::search(
moves_loop: // When in check, search starts here

// Step 12. A small Probcut idea, when we are in check (~4 Elo)
probCutBeta = beta + 452;
probCutBeta = beta + 420;
if (ss->inCheck && !PvNode && ttCapture && (tte->bound() & BOUND_LOWER)
&& tte->depth() >= depth - 4 && ttValue >= probCutBeta
&& std::abs(ttValue) < VALUE_TB_WIN_IN_MAX_PLY && std::abs(beta) < VALUE_TB_WIN_IN_MAX_PLY)
Expand Down Expand Up @@ -975,15 +975,15 @@ Value Search::Worker::search(
// Futility pruning for captures (~2 Elo)
if (!givesCheck && lmrDepth < 7 && !ss->inCheck)
{
Value futilityValue = ss->staticEval + 285 + 277 * lmrDepth
Value futilityValue = ss->staticEval + 295 + 280 * lmrDepth
+ PieceValue[capturedPiece] + captHist / 7;
if (futilityValue <= alpha)
continue;
}

// SEE based pruning for captures and checks (~11 Elo)
int seeHist = std::clamp(captHist / 32, -199 * depth, 199 * depth);
if (!pos.see_ge(move, -203 * depth - seeHist))
int seeHist = std::clamp(captHist / 32, -197 * depth, 196 * depth);
if (!pos.see_ge(move, -186 * depth - seeHist))
continue;
}
else
Expand All @@ -995,18 +995,18 @@ Value Search::Worker::search(
+ thisThread->pawnHistory[pawn_structure_index(pos)][movedPiece][move.to_sq()];

// Continuation history based pruning (~2 Elo)
if (lmrDepth < 6 && history < -4173 * depth)
if (lmrDepth < 6 && history < -4081 * depth)
continue;

history += 2 * thisThread->mainHistory[us][move.from_to()];

lmrDepth += history / 5285;
lmrDepth += history / 4768;

Value futilityValue =
ss->staticEval + (bestValue < ss->staticEval - 54 ? 128 : 57) + 131 * lmrDepth;
ss->staticEval + (bestValue < ss->staticEval - 52 ? 134 : 54) + 142 * lmrDepth;

// Futility pruning: parent node (~13 Elo)
if (!ss->inCheck && lmrDepth < 14 && futilityValue <= alpha)
if (!ss->inCheck && lmrDepth < 13 && futilityValue <= alpha)
{
if (bestValue <= futilityValue && std::abs(bestValue) < VALUE_TB_WIN_IN_MAX_PLY
&& futilityValue < VALUE_TB_WIN_IN_MAX_PLY)
Expand All @@ -1017,7 +1017,7 @@ Value Search::Worker::search(
lmrDepth = std::max(lmrDepth, 0);

// Prune moves with negative SEE (~4 Elo)
if (!pos.see_ge(move, -27 * lmrDepth * lmrDepth))
if (!pos.see_ge(move, -28 * lmrDepth * lmrDepth))
continue;
}
}
Expand All @@ -1037,11 +1037,11 @@ Value Search::Worker::search(
// so changing them requires tests at these types of time controls.
// Recursive singular search is avoided.
if (!rootNode && move == ttMove && !excludedMove
&& depth >= 4 - (thisThread->completedDepth > 33) + ss->ttPv
&& depth >= 4 - (thisThread->completedDepth > 32) + ss->ttPv
&& std::abs(ttValue) < VALUE_TB_WIN_IN_MAX_PLY && (tte->bound() & BOUND_LOWER)
&& tte->depth() >= depth - 3)
{
Value singularBeta = ttValue - (65 + 59 * (ss->ttPv && !PvNode)) * depth / 63;
Value singularBeta = ttValue - (65 + 52 * (ss->ttPv && !PvNode)) * depth / 63;
Depth singularDepth = newDepth / 2;

ss->excludedMove = move;
Expand Down Expand Up @@ -1099,7 +1099,7 @@ Value Search::Worker::search(
else if (PvNode && move == ttMove && move.to_sq() == prevSq
&& thisThread->captureHistory[movedPiece][move.to_sq()]
[type_of(pos.piece_on(move.to_sq()))]
> 3807)
> 4016)
extension = 1;
}

Expand Down Expand Up @@ -1151,10 +1151,10 @@ Value Search::Worker::search(
ss->statScore = 2 * thisThread->mainHistory[us][move.from_to()]
+ (*contHist[0])[movedPiece][move.to_sq()]
+ (*contHist[1])[movedPiece][move.to_sq()]
+ (*contHist[3])[movedPiece][move.to_sq()] - 5024;
+ (*contHist[3])[movedPiece][move.to_sq()] - 5078;

// Decrease/increase reduction for moves with a good/bad history (~8 Elo)
r -= ss->statScore / 13182;
r -= ss->statScore / 12076;

// Step 17. Late moves reduction / extension (LMR, ~117 Elo)
if (depth >= 2 && moveCount > 1 + rootNode)
Expand All @@ -1173,7 +1173,7 @@ Value Search::Worker::search(
{
// Adjust full-depth search based on LMR results - if the result
// was good enough search deeper, if it was bad enough search shallower.
const bool doDeeperSearch = value > (bestValue + 42 + 2 * newDepth); // (~1 Elo)
const bool doDeeperSearch = value > (bestValue + 40 + 2 * newDepth); // (~1 Elo)
const bool doShallowerSearch = value < bestValue + newDepth; // (~2 Elo)

newDepth += doDeeperSearch - doShallowerSearch;
Expand Down Expand Up @@ -1291,7 +1291,7 @@ Value Search::Worker::search(
else
{
// Reduce other moves if we have found at least one score improvement (~2 Elo)
if (depth > 2 && depth < 12 && beta < 13546 && value > -13478)
if (depth > 2 && depth < 13 && beta < 15868 && value > -14630)
depth -= 2;

assert(depth > 0);
Expand Down Expand Up @@ -1334,8 +1334,8 @@ Value Search::Worker::search(
// Bonus for prior countermove that caused the fail low
else if (!priorCapture && prevSq != SQ_NONE)
{
int bonus = (depth > 5) + (PvNode || cutNode) + ((ss - 1)->statScore < -14761)
+ ((ss - 1)->moveCount > 11) + (!ss->inCheck && bestValue <= ss->staticEval - 142)
int bonus = (depth > 5) + (PvNode || cutNode) + ((ss - 1)->statScore < -14455)
+ ((ss - 1)->moveCount > 10) + (!ss->inCheck && bestValue <= ss->staticEval - 130)
+ (!(ss - 1)->inCheck && bestValue <= -(ss - 1)->staticEval - 77);
update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq,
stat_bonus(depth) * bonus);
Expand Down Expand Up @@ -1495,7 +1495,7 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta,
if (bestValue > alpha)
alpha = bestValue;

futilityBase = ss->staticEval + 250;
futilityBase = ss->staticEval + 270;
}

const PieceToHistory* contHist[] = {(ss - 1)->continuationHistory,
Expand Down Expand Up @@ -1575,7 +1575,7 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta,
continue;

// Do not search moves with bad enough SEE values (~5 Elo)
if (!pos.see_ge(move, -79))
if (!pos.see_ge(move, -69))
continue;
}

Expand Down Expand Up @@ -1643,7 +1643,7 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta,

Depth Search::Worker::reduction(bool i, Depth d, int mn, int delta) {
int reductionScale = reductions[d] * reductions[mn];
return (reductionScale + 1150 - delta * 832 / rootDelta) / 1024 + (!i && reductionScale > 1025);
return (reductionScale + 1318 - delta * 760 / rootDelta) / 1024 + (!i && reductionScale > 1066);
}

TimePoint Search::Worker::elapsed() const {
Expand Down Expand Up @@ -1736,7 +1736,7 @@ void update_all_stats(const Position& pos,

if (!pos.capture_stage(bestMove))
{
int bestMoveBonus = bestValue > beta + 185 ? quietMoveBonus // larger bonus
int bestMoveBonus = bestValue > beta + 165 ? quietMoveBonus // larger bonus
: stat_bonus(depth); // smaller bonus

update_quiet_stats(pos, ss, workerThread, bestMove, bestMoveBonus);
Expand Down

0 comments on commit 070e564

Please sign in to comment.