From bbaba3b0af9d70d07b72c2c43458282e1acd08a5 Mon Sep 17 00:00:00 2001 From: Mohammad Durrani Date: Sat, 18 May 2024 20:32:01 -0400 Subject: [PATCH] updated html --- docs/index.html | 43 ++++++++++++++++++++++++------------------- final.ipynb | 13 +++---------- 2 files changed, 27 insertions(+), 29 deletions(-) diff --git a/docs/index.html b/docs/index.html index da17fc9..55973bb 100644 --- a/docs/index.html +++ b/docs/index.html @@ -7975,6 +7975,7 @@

4. Data Processing: Cleaning
import re
 
 def setupGame():
+    # Setup the starting board
     gameArr = [["" for i in range(8)] for i in range(8)]
     setupArr = ["R", "N", "B", "Q", "K", "B", "N", "R"]
     for i in range(8):
@@ -7986,8 +7987,11 @@ 

4. Data Processing: Cleaningreturn gameArr def makeMove(gameArr, move, moveNum): + # Each move can either be a regular move from one place to another, + # or it can be a special move with more steps than a simple move piece, specifier, file, rank = breakMoveUp(move) color = "W" if moveNum % 2 == 0 else "B" + # A castle is simply a rearranging of King and Rook if piece == "Castle": colorRank = 0 if color == "W" else 7 gameArr[colorRank][4] = "" @@ -8000,6 +8004,7 @@

4. Data Processing: CleaninggameArr[colorRank][2] = color + "K" gameArr[colorRank][3] = color + "R" gameArr[colorRank][4] = "" + # A promotion is a pawn move to the last rank and then a swap to a different piece elif piece == "Promotion": firstMove = move.split("=")[0] makeMove(gameArr, firstMove, moveNum) @@ -8011,8 +8016,10 @@

4. Data Processing: Cleaningpiece = color + piece col = ord(file) - ord("a") row = int(rank) - 1 + # Need to find the current piece that's being moved curRow,curCol = findPiecePos(piece, specifier, gameArr, col, row, color) - #En Passant Condition + #En Passant Condition: If a piece was captured by a pawn + # and the new location for it had no piece to begin with, then it must be an en passant if "x" in move and gameArr[row][col] == "": if color == "W": # Erasing black piece, so add 1 @@ -8039,6 +8046,11 @@

4. Data Processing: Cleaningif piece == gameArr[i][j] and (col==-1 or j==col) and (row==-1 or i==row): colDistance = abs(endCol-j) rowDistance = abs(endRow-i) + # An original position for a piece is valid when these conditions are satisfied: + # 1. The piece's movement rules are followed ex. N moves 2 squares one way and 1 square the other way + # 2. The piece's path to its destination is unobstructed + # 3. The piece's movement doesn't leave its king in check + #If positions satisfy movement rules of the piece if piece[1] == "N" and colDistance + rowDistance == 3 \ and min(colDistance, rowDistance) == 1 and not kingChecked(gameArr, color, i, j, endRow, endCol): @@ -8120,6 +8132,8 @@

4. Data Processing: Cleaningreturn (i,j) def kingChecked(gameArr, color, startRow, startCol, endRow, endCol): + # Checks if a piece is pinned to its king + # Does this by first assuming the move takes place, and then checking if the king is left attacked startPiece = gameArr[startRow][startCol] gameArr[startRow][startCol] = "" endPiece = gameArr[endRow][endCol] @@ -8162,6 +8176,7 @@

4. Data Processing: Cleaningreturn False def breakMoveUp(move): + # Break up the move into its piece, specifier, to break tie between multiple of the same piece, file, and rank # Get piece files = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] ranks = ['1', '2', '3', '4', '5', '6', '7', '8'] @@ -8204,6 +8219,7 @@

4. Data Processing: Cleaningreturn piece, specifier, file, rank def playMoves(moves): + # Play through the moves by calling makeMove repeatedly gameArr = setupGame() for i in range(len(moves)): makeMove(gameArr, moves[i], i) @@ -8232,6 +8248,7 @@

4. Data Processing: Cleaningreturn whiteKingSafetyPenalty - blackKingSafetyPenalty def calculateKingSafetyPenalty(minRow, maxRow, minCol, maxCol, color, gameArr): + # Open Files are -3 and Missing Pawns are -1 totalPenalty = 0 for b in range(minCol, maxCol+1): # Missing Pawn Check @@ -8276,6 +8293,7 @@

4. Data Processing: Cleaningreturn whiteMobility - blackMobility def generateMobilityCombos(piece, row, col, gameArr): + # Checking all possible ways each piece can move to determine number of legal squares mobility = 0 if piece == "N": nArr = [(-1,-2), (-1,2), (1,-2), (1,2), (2, 1), (2, -1), (-2,1), (-2,-1)] @@ -8360,6 +8378,7 @@

4. Data Processing: Cleaningreturn whitePoints - blackPoints def calcPoints(dict): + # points = dict["B"] * 3 + dict["N"] * 3 + dict["P"] * 1 + dict["Q"] * 9 + dict["R"] * 5 if dict["B"] == 2: points += 1 @@ -9333,8 +9352,8 @@

Choosing A Model @@ -9841,8 +9860,8 @@

Benchmark

@@ -9912,20 +9931,6 @@

8. References and Additional Res - diff --git a/final.ipynb b/final.ipynb index 79ce317..2719afd 100644 --- a/final.ipynb +++ b/final.ipynb @@ -489,7 +489,7 @@ " # 1. The piece's movement rules are followed ex. N moves 2 squares one way and 1 square the other way\n", " # 2. The piece's path to its destination is unobstructed\n", " # 3. The piece's movement doesn't leave its king in check\n", - " If positions satisfy movement rules of the piece\n", + " #If positions satisfy movement rules of the piece\n", " if piece[1] == \"N\" and colDistance + rowDistance == 3 \\\n", " and min(colDistance, rowDistance) == 1 and not kingChecked(gameArr, color, i, j, endRow, endCol):\n", " \n", @@ -1623,9 +1623,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "From the graph above, we can see that the GradientBoostingClassifier and random forest is about equal, and KNeighbors performing the worst. This is also intuitive because gradient boosting and random forest are ensemble methods that combine multiple weak learners (descision trees in both cases) to create a strong learner. On the other hand, K-Nearest Neighbors (KNN) is a simpler algorithm that relies on the proximity of data points to make predictions. It may struggle with high-dimensional data and can be sensitive to the choice of the number of neighbors (k). \n", + "From the graph above, we can see that the GradientBoostingClassifier and random forest are about equal, and KNeighbors performing the worst. This is also intuitive because gradient boosting and random forest are ensemble methods that combine multiple weak learners (descision trees in both cases) to create a strong learner. On the other hand, K-Nearest Neighbors (KNN) is a simpler algorithm that relies on the proximity of data points to make predictions. It may struggle with high-dimensional data and can be sensitive to the choice of the number of neighbors (k). \n", "\n", - "Despite their similar performance, we will go with the gradient boosting classifier. This is because it is an iterative algorithm that progressively improves the model by focusing on the misclassified samples from previous iterations. This allows it to effectively handle difficult cases and achieve higher accuracy. This is what leads it to better performance over the random forest classifier. In contrast, KNN's performance heavily depends on the quality and relevance of the selected neighbors. If the neighbors are not representative of the true class distribution or if the features are not well-separated, KNN may struggle to make accurate predictions." + "Given their similar performance, we will go with the gradient boosting classifier. This is because it is an iterative algorithm that progressively improves the model by focusing on the misclassified samples from previous iterations. This allows it to effectively handle difficult cases and achieve higher accuracy. It also has more hyperparameters that can be tuned to improve overall performance. In contrast, KNN's performance heavily depends on the quality and relevance of the selected neighbors. If the neighbors are not representative of the true class distribution or if the features are not well-separated, KNN may struggle to make accurate predictions." ] }, { @@ -2135,13 +2135,6 @@ "\n", "Number of moves for opening: https://www.chessable.com/blog/opening-guide/#:~:text=Discover-,Introduction%20to%20Chess%20Openings,the%20main%20fight%20takes%20place\n" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": {