diff --git a/include/config.h b/include/config.h
index 2715420c..afa24495 100644
--- a/include/config.h
+++ b/include/config.h
@@ -74,7 +74,7 @@
//#define RAPID_GAME
-//#define BOOK_LEARNING
+#define ENDGAME_LEARNING
#define THREEFOLD_REPETITION
diff --git a/millgame.pro b/millgame.pro
index 793473ab..776665f6 100644
--- a/millgame.pro
+++ b/millgame.pro
@@ -22,6 +22,7 @@ INCLUDEPATH += src/game
INCLUDEPATH += src/ui/qt
SOURCES += \
+ src/ai/endgame.cpp \
src/ai/evaluate.cpp \
src/ai/movegen.cpp \
src/ai/tt.cpp \
@@ -47,6 +48,7 @@ HEADERS += \
include/config.h \
include/version.h \
include/version.h.template \
+ src/ai/endgame.h \
src/ai/evaluate.h \
src/ai/movegen.h \
src/ai/tt.h \
@@ -59,7 +61,6 @@ HEADERS += \
src/base/stackalloc.h \
src/base/thread.h \
src/ai/search.h \
- src/ai/zobrist.h \
src/base/zobrist.h \
src/game/board.h \
src/game/player.h \
diff --git a/millgame.vcxproj b/millgame.vcxproj
index 6a03eba0..454502ba 100644
--- a/millgame.vcxproj
+++ b/millgame.vcxproj
@@ -441,6 +441,7 @@
+
@@ -692,6 +693,7 @@
+
diff --git a/millgame.vcxproj.filters b/millgame.vcxproj.filters
index b02d46ef..51f9fd0a 100644
--- a/millgame.vcxproj.filters
+++ b/millgame.vcxproj.filters
@@ -117,6 +117,9 @@
base
+
+ ai
+
@@ -335,6 +338,9 @@
base
+
+ ai
+
diff --git a/src/ai/endgame.cpp b/src/ai/endgame.cpp
new file mode 100644
index 00000000..8e1d5989
--- /dev/null
+++ b/src/ai/endgame.cpp
@@ -0,0 +1,27 @@
+/*****************************************************************************
+ * Copyright (C) 2018-2019 MillGame authors
+ *
+ * Authors: liuweilhy
+ * Calcitem
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
+ *****************************************************************************/
+
+#include "endgame.h"
+
+#ifdef ENDGAME_LEARNING
+static constexpr int endgameHashsize = 0x1000000; // 16M
+HashMap endgameHashMap(endgameHashsize);
+#endif // ENDGAME_LEARNING
diff --git a/src/ai/endgame.h b/src/ai/endgame.h
new file mode 100644
index 00000000..be09c8fd
--- /dev/null
+++ b/src/ai/endgame.h
@@ -0,0 +1,56 @@
+/*****************************************************************************
+ * Copyright (C) 2018-2019 MillGame authors
+ *
+ * Authors: liuweilhy
+ * Calcitem
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
+ *****************************************************************************/
+
+#ifndef ENDGAME_H
+#define ENDGAME_H
+
+#include "config.h"
+
+#ifdef ENDGAME_LEARNING
+
+#include
+
+#include "types.h"
+#include "hashmap.h"
+
+using namespace std;
+using namespace CTSL;
+
+enum endgame_t : uint8_t
+{
+ ENDGAME_NONE,
+ ENDGAME_PLAYER_1_WIN,
+ ENDGAME_PLAYER_2_WIN,
+ ENDGAME_DRAW,
+};
+
+//#pragma pack (push, 1)
+struct Endgame
+{
+ endgame_t type;
+};
+//#pragma pack(pop)
+
+extern HashMap endgameHashMap;
+
+#endif // ENDGAME_LEARNING
+
+#endif // ENDGAME_H
diff --git a/src/ai/evaluate.h b/src/ai/evaluate.h
index 4487f2b5..ac9e9736 100644
--- a/src/ai/evaluate.h
+++ b/src/ai/evaluate.h
@@ -40,49 +40,49 @@ public:
#ifdef EVALUATE_ENABLE
#ifdef EVALUATE_MATERIAL
- static value_t evaluateMaterial(MillGameAi_ab::Node *node)
+ static value_t evaluateMaterial(AIAlgorithm::Node *node)
{
return 0;
}
#endif
#ifdef EVALUATE_SPACE
- static value_t evaluateSpace(MillGameAi_ab::Node *node)
+ static value_t evaluateSpace(AIAlgorithm::Node *node)
{
return 0;
}
#endif
#ifdef EVALUATE_MOBILITY
- static value_t evaluateMobility(MillGameAi_ab::Node *node)
+ static value_t evaluateMobility(AIAlgorithm::Node *node)
{
return 0;
}
#endif
#ifdef EVALUATE_TEMPO
- static value_t evaluateTempo(MillGameAi_ab::Node *node)
+ static value_t evaluateTempo(AIAlgorithm::Node *node)
{
return 0;
}
#endif
#ifdef EVALUATE_THREAT
- static value_t evaluateThreat(MillGameAi_ab::Node *node)
+ static value_t evaluateThreat(AIAlgorithm::Node *node)
{
return 0;
}
#endif
#ifdef EVALUATE_SHAPE
- static value_t evaluateShape(MillGameAi_ab::Node *node)
+ static value_t evaluateShape(AIAlgorithm::Node *node)
{
return 0;
}
#endif
#ifdef EVALUATE_MOTIF
- static value_t MillGameAi_ab::evaluateMotif(MillGameAi_ab::Node *node)
+ static value_t AIAlgorithm::evaluateMotif(AIAlgorithm::Node *node)
{
return 0;
}
diff --git a/src/ai/search.cpp b/src/ai/search.cpp
index f89a6fdf..8c6b4731 100644
--- a/src/ai/search.cpp
+++ b/src/ai/search.cpp
@@ -29,16 +29,11 @@
#include "movegen.h"
#include "hashmap.h"
#include "tt.h"
+#include "endgame.h"
#include "types.h"
using namespace CTSL;
-#ifdef BOOK_LEARNING
-static constexpr int bookHashsize = 0x1000000; // 16M
-HashMap bookHashMap(bookHashsize);
-vector openingBook;
-#endif // BOOK_LEARNING
-
// 用于检测重复局面 (Position)
vector history;
@@ -79,11 +74,19 @@ depth_t AIAlgorithm::changeDepth(depth_t origDepth)
12, 12, 13, 14, /* 20 ~ 23 */
};
+#ifdef ENDGAME_LEARNING
+ const depth_t movingDiffDepthTable[] = {
+ 0, 0, 0, /* 0 ~ 2 */
+ 0, 0, 0, 0, 0, /* 3 ~ 7 */
+ 0, 0, 0, 0, 0 /* 8 ~ 12 */
+ };
+#else
const depth_t movingDiffDepthTable[] = {
0, 0, 0, /* 0 ~ 2 */
11, 10, 9, 8, 7, /* 3 ~ 7 */
6, 5, 4, 3, 2 /* 8 ~ 12 */
};
+#endif /* ENDGAME_LEARNING */
if ((tempGame.position.phase) & (PHASE_PLACING)) {
d = placingDepthTable[tempGame.getPiecesInHandCount(1)];
@@ -288,11 +291,11 @@ void AIAlgorithm::setGame(const Game &game)
TranspositionTable::clear();
#endif // TRANSPOSITION_TABLE_ENABLE
-#ifdef BOOK_LEARNING
- // TODO: 规则改变时清空学习表
- //clearBookHashMap();
- //openingBook.clear();
-#endif // BOOK_LEARNING
+#ifdef ENDGAME_LEARNING
+ // TODO: 规则改变时清空残局库
+ //clearEndgameHashMap();
+ //endgameList.clear();
+#endif // ENDGAME_LEARNING
history.clear();
}
@@ -332,19 +335,6 @@ int AIAlgorithm::search(depth_t depth)
auto timeStart = chrono::steady_clock::now();
chrono::steady_clock::time_point timeEnd;
-#ifdef BOOK_LEARNING
- if (position_.getPhase() == GAME_PLACING)
- {
- if (position_.position.nPiecesInHand[1] <= 10) {
- // 开局库只记录摆棋阶段最后的局面
- openingBook.push_back(position_.getHash());
- } else {
- // 暂时在此处清空开局库
- openingBook.clear();
- }
- }
-#endif
-
#ifdef THREEFOLD_REPETITION
static int nRepetition = 0;
@@ -415,18 +405,37 @@ value_t AIAlgorithm::search(depth_t depth, value_t alpha, value_t beta, Node *no
// 子节点的最优着法
move_t bestMove = MOVE_NONE;
-#if ((defined TRANSPOSITION_TABLE_ENABLE) || (defined BOOK_LEARNING))
- // 哈希类型
- enum TranspositionTable::HashType hashf = TranspositionTable::hashfALPHA;
-
+#if defined (TRANSPOSITION_TABLE_ENABLE) || defined(ENDGAME_LEARNING)
// 获取哈希值
hash_t hash = tempGame.getHash();
+#endif
+
+#ifdef ENDGAME_LEARNING
+ // 检索残局库
+ Endgame endgame;
+
+ if (findEndgameHash(hash, endgame)) {
+ switch (endgame.type) {
+ case ENDGAME_PLAYER_1_WIN:
+ node->value = VALUE_WIN;
+ case ENDGAME_PLAYER_2_WIN:
+ node->value = -VALUE_WIN;
+ default:
+ break;
+ }
+
+ return node->value;
+ }
+#endif /* ENDGAME_LEARNING */
+
+#ifdef TRANSPOSITION_TABLE_ENABLE
+ // 哈希类型
+ enum TranspositionTable::HashType hashf = TranspositionTable::hashfALPHA;
+
#ifdef DEBUG_AB_TREE
node->hash = hash;
#endif
-#endif
-#ifdef TRANSPOSITION_TABLE_ENABLE
TranspositionTable::HashType type = TranspositionTable::hashfEMPTY;
value_t probeVal = TranspositionTable::probeHash(hash, depth, alpha, beta, bestMove, type);
@@ -455,7 +464,7 @@ value_t AIAlgorithm::search(depth_t depth, value_t alpha, value_t beta, Node *no
#endif
return node->value;
-}
+ }
//hashMapMutex.unlock();
#endif /* TRANSPOSITION_TABLE_ENABLE */
@@ -515,16 +524,6 @@ value_t AIAlgorithm::search(depth_t depth, value_t alpha, value_t beta, Node *no
}
#endif
-#ifdef BOOK_LEARNING
- // 检索开局库
- if (position->phase == GAME_PLACING && findBookHash(hash, hashValue)) {
- if (position->turn == ???) {
- // TODO:
- node->value += 1;
- }
- }
-#endif
-
#ifdef TRANSPOSITION_TABLE_ENABLE
// 记录确切的哈希值
TranspositionTable::recordHash(node->value, depth, TranspositionTable::hashfEXACT, hash, MOVE_NONE);
@@ -702,6 +701,13 @@ const char* AIAlgorithm::bestMove()
// 自动认输
if (isMostLose) {
+#ifdef ENDGAME_LEARNING
+ Endgame endgame;
+ endgame.type = game_.position.sideToMove == PLAYER_1 ?
+ ENDGAME_PLAYER_2_WIN : ENDGAME_PLAYER_1_WIN;
+ recordEndgameHash(this->game_.getHash(), endgame);
+#endif /* ENDGAME_LEARNING */
+
sprintf(cmdline, "Player%d give up!", game_.position.sideId);
return cmdline;
}
@@ -760,58 +766,37 @@ const char *AIAlgorithm::moveToCommand(move_t move)
return cmdline;
}
-#ifdef BOOK_LEARNING
-
-bool AIAlgorithm::findBookHash(hash_t hash, HashValue &hashValue)
+#ifdef ENDGAME_LEARNING
+bool AIAlgorithm::findEndgameHash(hash_t hash, Endgame &endgame)
{
- return bookHashMap.find(hash, hashValue);
+ return endgameHashMap.find(hash, endgame);
}
-int AIAlgorithm::recordBookHash(hash_t hash, const HashValue &hashValue)
+int AIAlgorithm::recordEndgameHash(hash_t hash, const Endgame &endgame)
{
//hashMapMutex.lock();
- bookHashMap.insert(hash, hashValue);
+ endgameHashMap.insert(hash, endgame);
//hashMapMutex.unlock();
return 0;
}
-void AIAlgorithm::clearBookHashMap()
+void AIAlgorithm::clearEndgameHashMap()
{
//hashMapMutex.lock();
- bookHashMap.clear();
+ endgameHashMap.clear();
//hashMapMutex.unlock();
}
-void AIAlgorithm::recordOpeningBookToHashMap()
+void AIAlgorithm::recordEndgameHashMapToFile()
{
- HashValue hashValue;
- hash_t hash = 0;
-
- for (auto iter = openingBook.begin(); iter != openingBook.end(); ++iter)
- {
-#if 0
- if (findBookHash(*iter, hashValue))
- {
- }
-#endif
- memset(&hashValue, 0, sizeof(HashValue));
- hash = *iter;
- recordBookHash(hash, hashValue); // 暂时使用直接覆盖策略
- }
-
- openingBook.clear();
+ const QString filename = "endgame.txt";
+ endgameHashMap.dump(filename);
}
-void AIAlgorithm::recordOpeningBookHashMapToFile()
+void AIAlgorithm::loadEndgameFileToHashMap()
{
- const QString bookFileName = "opening-book.txt";
- bookHashMap.dump(bookFileName);
+ const QString filename = "endgame.txt";
+ endgameHashMap.load(filename);
}
-
-void AIAlgorithm::loadOpeningBookFileToHashMap()
-{
- const QString bookFileName = "opening-book.txt";
- bookHashMap.load(bookFileName);
-}
-#endif // BOOK_LEARNING
+#endif // ENDGAME_LEARNING
diff --git a/src/ai/search.h b/src/ai/search.h
index 976d1231..4a3cbb29 100644
--- a/src/ai/search.h
+++ b/src/ai/search.h
@@ -36,7 +36,10 @@
#include
#include "position.h"
+#include "tt.h"
#include "hashmap.h"
+#include "endgame.h"
+#include "types.h"
#ifdef MEMORY_POOL
#include "MemoryPool.h"
@@ -111,7 +114,7 @@ public:
// 返回最佳走法的命令行
const char *bestMove();
-#if ((defined TRANSPOSITION_TABLE_ENABLE) || (defined BOOK_LEARNING))
+#ifdef TRANSPOSITION_TABLE_ENABLE
// 清空哈希表
void clearTranspositionTable();
#endif
@@ -120,14 +123,14 @@ public:
static bool nodeLess(const Node *first, const Node *second);
static bool nodeGreater(const Node *first, const Node *second);
-#ifdef BOOK_LEARNING
- bool findBookHash(hash_t hash, HashValue &hashValue);
- static int recordBookHash(hash_t hash, const HashValue &hashValue);
- void clearBookHashMap();
- static void recordOpeningBookToHashMap();
- static void recordOpeningBookHashMapToFile();
- static void loadOpeningBookFileToHashMap();
-#endif // BOOK_LEARNING
+#ifdef ENDGAME_LEARNING
+ bool findEndgameHash(hash_t hash, Endgame &endgame);
+ static int recordEndgameHash(hash_t hash, const Endgame &endgame);
+ void clearEndgameHashMap();
+ static void recordEndgameHashMapToFile();
+ static void loadEndgameFileToHashMap();
+#endif // ENDGAME_LEARNING
+
public: /* TODO: Move to private or protected */
// 增加新节点
diff --git a/src/game/position.cpp b/src/game/position.cpp
index ae456117..27c8c94d 100644
--- a/src/game/position.cpp
+++ b/src/game/position.cpp
@@ -34,9 +34,9 @@ Game::Game()
// 创建哈希数据
constructHash();
-#ifdef BOOK_LEARNING
- // TODO: 开局库文件被加载了多次
- MillGameAi_ab::loadOpeningBookFileToHashMap();
+#ifdef ENDGAME_LEARNING
+ // TODO: 残局文件被加载了多次
+ AIAlgorithm::loadEndgameFileToHashMap();
#endif
// 默认选择第1号规则,即“打三棋”
@@ -886,9 +886,6 @@ bool Game::win(bool forceDraw)
position.phase = PHASE_GAMEOVER;
sprintf(cmdline, "Player%d win!", o);
cmdlist.emplace_back(string(cmdline));
-#ifdef BOOK_LEARNING
- MillGameAi_ab::recordOpeningBookToHashMap(); // TODO: 目前是对"双方"失败都记录到开局库
-#endif /* BOOK_LEARNING */
return true;
}
@@ -922,9 +919,6 @@ bool Game::win(bool forceDraw)
int winnerId = Player::toId(winner);
sprintf(cmdline, "Player%d no way to go. Player%d win!", position.sideId, winnerId);
cmdlist.emplace_back(string(cmdline));
-#ifdef BOOK_LEARNING
- MillGameAi_ab::recordOpeningBookToHashMap(); // TODO: 目前是对所有的失败记录到开局库
-#endif /* BOOK_LEARNING */
return true;
}
diff --git a/src/ui/qt/gamecontroller.cpp b/src/ui/qt/gamecontroller.cpp
index ff88162a..d20e8111 100644
--- a/src/ui/qt/gamecontroller.cpp
+++ b/src/ui/qt/gamecontroller.cpp
@@ -98,9 +98,9 @@ GameController::~GameController()
delete ai[1];
delete ai[2];
-#ifdef BOOK_LEARNING
- MillGameAi_ab::recordOpeningBookHashMapToFile();
-#endif /* BOOK_LEARNING */
+#ifdef ENDGAME_LEARNING
+ AIAlgorithm::recordEndgameHashMapToFile();
+#endif /* ENDGAME_LEARNING */
}
const QMap GameController::getActions()