positionContext -> position

This commit is contained in:
CalciteM Team 2019-09-15 18:14:53 +08:00
parent df23c9dc5f
commit 24b732ca08
3 changed files with 31 additions and 31 deletions

View File

@ -21,7 +21,7 @@
#include "evaluate.h"
value_t Evaluation::getValue(Game &dummyGame, Position *positionContext, MillGameAi_ab::Node *node)
value_t Evaluation::getValue(Game &dummyGame, Position *position, MillGameAi_ab::Node *node)
{
// 初始评估值为0对先手有利则增大对后手有利则减小
value_t value = VALUE_ZERO;
@ -31,31 +31,31 @@ value_t Evaluation::getValue(Game &dummyGame, Position *positionContext, MillGam
int nPiecesNeedRemove = 0;
#ifdef DEBUG_AB_TREE
node->phase = positionContext->phase;
node->action = positionContext->action;
node->phase = position->phase;
node->action = position->action;
node->evaluated = true;
#endif
switch (positionContext->phase) {
switch (position->phase) {
case PHASE_NOTSTARTED:
break;
case PHASE_PLACING:
// 按手中的棋子计分不要break;
nPiecesInHandDiff = positionContext->nPiecesInHand[1] - positionContext->nPiecesInHand[2];
nPiecesInHandDiff = position->nPiecesInHand[1] - position->nPiecesInHand[2];
value += nPiecesInHandDiff * VALUE_EACH_PIECE_INHAND;
#ifdef DEBUG_AB_TREE
node->nPiecesInHandDiff = nPiecesInHandDiff;
#endif
// 按场上棋子计分
nPiecesOnBoardDiff = positionContext->nPiecesOnBoard[1] - positionContext->nPiecesOnBoard[2];
nPiecesOnBoardDiff = position->nPiecesOnBoard[1] - position->nPiecesOnBoard[2];
value += nPiecesOnBoardDiff * VALUE_EACH_PIECE_ONBOARD;
#ifdef DEBUG_AB_TREE
node->nPiecesOnBoardDiff = nPiecesOnBoardDiff;
#endif
switch (positionContext->action) {
switch (position->action) {
// 选子和落子使用相同的评价方法
case ACTION_CHOOSE:
case ACTION_PLACE:
@ -63,8 +63,8 @@ value_t Evaluation::getValue(Game &dummyGame, Position *positionContext, MillGam
// 如果形成去子状态每有一个可去的子算100分
case ACTION_CAPTURE:
nPiecesNeedRemove = (positionContext->turn == PLAYER_1) ?
positionContext->nPiecesNeedRemove : -(positionContext->nPiecesNeedRemove);
nPiecesNeedRemove = (position->turn == PLAYER_1) ?
position->nPiecesNeedRemove : -(position->nPiecesNeedRemove);
value += nPiecesNeedRemove * VALUE_EACH_PIECE_NEEDREMOVE;
#ifdef DEBUG_AB_TREE
node->nPiecesNeedRemove = nPiecesNeedRemove;
@ -78,15 +78,15 @@ value_t Evaluation::getValue(Game &dummyGame, Position *positionContext, MillGam
case PHASE_MOVING:
// 按场上棋子计分
value = positionContext->nPiecesOnBoard[1] * VALUE_EACH_PIECE_ONBOARD -
positionContext->nPiecesOnBoard[2] * VALUE_EACH_PIECE_ONBOARD;
value = position->nPiecesOnBoard[1] * VALUE_EACH_PIECE_ONBOARD -
position->nPiecesOnBoard[2] * VALUE_EACH_PIECE_ONBOARD;
#ifdef EVALUATE_MOBILITY
// 按棋子活动能力计分
value += dummyGame.getMobilityDiff(positionContext->turn, dummyGame.currentRule, positionContext->nPiecesInHand[1], positionContext->nPiecesInHand[2], false) * 10;
value += dummyGame.getMobilityDiff(position->turn, dummyGame.currentRule, position->nPiecesInHand[1], position->nPiecesInHand[2], false) * 10;
#endif /* EVALUATE_MOBILITY */
switch (positionContext->action) {
switch (position->action) {
// 选子和落子使用相同的评价方法
case ACTION_CHOOSE:
case ACTION_PLACE:
@ -94,8 +94,8 @@ value_t Evaluation::getValue(Game &dummyGame, Position *positionContext, MillGam
// 如果形成去子状态每有一个可去的子算128分
case ACTION_CAPTURE:
nPiecesNeedRemove = (positionContext->turn == PLAYER_1) ?
positionContext->nPiecesNeedRemove : -(positionContext->nPiecesNeedRemove);
nPiecesNeedRemove = (position->turn == PLAYER_1) ?
position->nPiecesNeedRemove : -(position->nPiecesNeedRemove);
value += nPiecesNeedRemove * VALUE_EACH_PIECE_NEEDREMOVE_2;
#ifdef DEBUG_AB_TREE
node->nPiecesNeedRemove = nPiecesNeedRemove;
@ -110,7 +110,7 @@ value_t Evaluation::getValue(Game &dummyGame, Position *positionContext, MillGam
// 终局评价最简单
case PHASE_GAMEOVER:
// 布局阶段闷棋判断
if (positionContext->nPiecesOnBoard[1] + positionContext->nPiecesOnBoard[2] >=
if (position->nPiecesOnBoard[1] + position->nPiecesOnBoard[2] >=
Board::N_SEATS * Board::N_RINGS) {
if (dummyGame.getRule()->isStartingPlayerLoseWhenBoardFull) {
value -= VALUE_WIN;
@ -120,18 +120,18 @@ value_t Evaluation::getValue(Game &dummyGame, Position *positionContext, MillGam
}
// 走棋阶段被闷判断
if (positionContext->action == ACTION_CHOOSE &&
dummyGame.context.board.isAllSurrounded(positionContext->turn, dummyGame.currentRule, positionContext->nPiecesOnBoard, positionContext->turn) &&
if (position->action == ACTION_CHOOSE &&
dummyGame.context.board.isAllSurrounded(position->turn, dummyGame.currentRule, position->nPiecesOnBoard, position->turn) &&
dummyGame.getRule()->isLoseWhenNoWay) {
// 规则要求被“闷”判负,则对手获胜
value_t delta = positionContext->turn == PLAYER_1 ? -VALUE_WIN : VALUE_WIN;
value_t delta = position->turn == PLAYER_1 ? -VALUE_WIN : VALUE_WIN;
value += delta;
}
// 剩余棋子个数判断
if (positionContext->nPiecesOnBoard[1] < dummyGame.getRule()->nPiecesAtLeast) {
if (position->nPiecesOnBoard[1] < dummyGame.getRule()->nPiecesAtLeast) {
value -= VALUE_WIN;
} else if (positionContext->nPiecesOnBoard[2] < dummyGame.getRule()->nPiecesAtLeast) {
} else if (position->nPiecesOnBoard[2] < dummyGame.getRule()->nPiecesAtLeast) {
value += VALUE_WIN;
}

View File

@ -276,7 +276,7 @@ void MillGameAi_ab::setGame(const Game &game)
this->game_ = game;
dummyGame = game;
positionContext = &(dummyGame.context);
position = &(dummyGame.context);
requiredQuit = false;
deleteTree(rootNode);
#ifdef MEMORY_POOL
@ -425,7 +425,7 @@ value_t MillGameAi_ab::alphaBetaPruning(depth_t depth, value_t alpha, value_t be
#if 0
// TODO: 有必要针对深度微调 value?
if (positionContext->turn == PLAYER_1)
if (position->turn == PLAYER_1)
node->value += hashValue.depth - depth;
else
node->value -= hashValue.depth - depth;
@ -440,7 +440,7 @@ value_t MillGameAi_ab::alphaBetaPruning(depth_t depth, value_t alpha, value_t be
#ifdef DEBUG_AB_TREE
node->depth = depth;
node->root = rootNode;
// node->player = positionContext->turn;
// node->player = position->turn;
// 初始化
node->isLeaf = false;
node->isTimeout = false;
@ -452,9 +452,9 @@ value_t MillGameAi_ab::alphaBetaPruning(depth_t depth, value_t alpha, value_t be
#endif // DEBUG_AB_TREE
// 搜索到叶子节点(决胜局面) // TODO: 对哈希进行特殊处理
if (positionContext->phase == PHASE_GAMEOVER) {
if (position->phase == PHASE_GAMEOVER) {
// 局面评估
node->value = Evaluation::getValue(dummyGame, positionContext, node);
node->value = Evaluation::getValue(dummyGame, position, node);
evaluatedNodeCount++;
// 为争取速胜value 值 +- 深度
@ -479,11 +479,11 @@ value_t MillGameAi_ab::alphaBetaPruning(depth_t depth, value_t alpha, value_t be
// 搜索到第0层或需要退出
if (!depth || requiredQuit) {
// 局面评估
node->value = Evaluation::getValue(dummyGame, positionContext, node);
node->value = Evaluation::getValue(dummyGame, position, node);
evaluatedNodeCount++;
// 为争取速胜value 值 +- 深度 (有必要?)
value_t delta = value_t(positionContext->turn == PLAYER_1 ? depth : -depth);
value_t delta = value_t(position->turn == PLAYER_1 ? depth : -depth);
node->value += delta;
#ifdef DEBUG_AB_TREE
@ -494,8 +494,8 @@ value_t MillGameAi_ab::alphaBetaPruning(depth_t depth, value_t alpha, value_t be
#ifdef BOOK_LEARNING
// 检索开局库
if (positionContext->phase == GAME_PLACING && findBookHash(hash, hashValue)) {
if (positionContext->turn == ???) {
if (position->phase == GAME_PLACING && findBookHash(hash, hashValue)) {
if (position->turn == ???) {
// TODO:
node->value += 1;
}

View File

@ -187,7 +187,7 @@ private:
// 演算用的模型
Game dummyGame;
Position *positionContext {};
Position *position {};
// hash 计算时,各种转换用的模型
Game dummyGameShift;