diff --git a/.gitignore b/.gitignore index 75249f3..3dbb462 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ misc/* *.pyc *.txt *.json +*.db # Spreadsheet wee 3 letter spreadsheet/* diff --git a/Changelog.md b/Changelog.md index 8d805fd..3e8e391 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,22 +1,33 @@ # Changelog -This document details the changes I made, -and how they impact the performance of the program. +TODO: +- Better configuration (CLI arguments) +- Tokenizers & analysis (w/ transformers and a llama-like model, such as minillama) +- Hybrid Step-Generation Preprocessing +- N-Step-Lookahead Generational Algorithm for savefiles? +Maybe even alpha-beta pruning? -All benchmarks are ran on a personal system with the following specs: -i9-13950HX, -128GB RAM -Note that power settings and the like can change between versions, so results -are not comparable between them. +## Version 1.5.1 -TODO: -- Saving current progress, and resuming from the last point to avoid heavy precompute -- Better configuration (CLI arguments or config file) +- Added command line arguments for `optimize.py`. +- Added command line arguments for `speedrun.py`. +- Added command line arguments for `main.py`. -Future TODO: -- Speedrun auto-optimization (minimum spanning hypergraph) +## Version 1.5 +Added optimizers for speedrunning paths and finding optimal paths in savefiles! + +- Separated the `pair_to_int` and `int_to_pair` functions +along with static variables such as `DEFAULT_STARTING_ITEMS` +and `WORD_COMBINE_CHAR_LIMIT`into `util.py`. +- Added a `OptimizerRecipeList` class for a common interface +between all of the optimizers that will be implemented. +- Added an A* algorithm `optimizers/a_star.py` starting from the result +element with a generation-based admissible heuristic. (Source: BRH0208) +- Added a simple generational algorithm `optimizers/simple_generational.py` +starting from the result element with a generation-based heuristic, +made for fast analysis of savefiles. Used on yui's site (Infinite Craft Browser). ## Version 1.4 @@ -73,6 +84,18 @@ Do note that the parity of depth has some impact on the number of states. 8 6566 19.3702 3844983 3.871 209607 9 17045 163.8879 31438265 21.261 1191681 + NEW + DEPTH | SIZE | TIME | STATES | + 1 10 0.030 10 + 2 29 0.031 29 + 3 81 0.036 113 + 4 211 0.045 414 + 5 486 0.095 1642 + 6 1114 0.229 7822 + 7 2682 0.812 39284 + 8 6566 3.871 209607 + 9 17045 21.261 1191681 + ## Version 1.2.2 diff --git a/Readme.md b/Readme.md index b93f39b..d839848 100644 --- a/Readme.md +++ b/Readme.md @@ -8,26 +8,21 @@ Every time a new depth is completed, I will publish the recipes as a release. ## Usage -TODO: Make this more usable! +Requires python 3.11-3.12. -Requires Python 3.9~3.11 (I'm not that familiar with Python versions, but -there's a few different versions installed on each machine I've run this on) - -Run `main.py` to search. - -To edit any settings, edit `init_state` for your starting state if you -have an existing recipe, and `recipe.py` for how recipes are handled. - -If you want to make sure your speedrun route has missing elements, -repeats, or unused elements, run `speedrun.txt`. It must be in the format -of `x + y -> z` every line, except comment lines that start with `#`. +Install the required packages with: +```commandline +pip install -r requirements.txt +``` -There are two types of checks in `speedrun.txt`: static check makes sure -that your speedrun route makes sense (no duplicates, no missing elements, no -unused elements) while dynamic check makes requests to Neal's API to make sure -that your speedrun route has correct crafts. +To run the script, simply run: +```commandline +python main.py +``` -Currently, the script does not accept CLI arguments. +`speedrun.py` can help you check your speedrun recipes for errors. +`optimize.py` can help you optimize your speedruns. +Use `-h` for help with any CLI arguments. ## Methodology diff --git a/convert.py b/convert.py index 3610c72..30ac1e4 100644 --- a/convert.py +++ b/convert.py @@ -2,6 +2,7 @@ import json import math import os +import sqlite3 from functools import cache from typing import Optional @@ -9,51 +10,7 @@ import bidict import recipe - - -def result_key(a: str, b: str) -> str: - if a > b: - a, b = b, a - return a + "\t" + b - - -def save(dictionary, file_name): - try: - json.dump(dictionary, open(file_name, 'w', encoding='utf-8'), ensure_ascii=False) - except FileNotFoundError: - print(f"Could not write to {file_name}! Trying to create cache folder...", flush=True) - try: - os.mkdir("cache") # TODO: generalize - json.dump(dictionary, open(file_name, 'w', encoding='utf-8'), ensure_ascii=False) - except Exception as e: - print(f"Could not create folder or write to file: {e}", flush=True) - print(dictionary) - except Exception as e: - print(f"Unrecognized Error: {e}", flush=True) - print(dictionary) - - -def best_recipes_to_json(recipe_file: str, output_file: str): - try: - with open(recipe_file, "r") as fin: - lines = fin.readlines() - except (IOError, ValueError): - print("Could not load recipe file", flush=True) - return - - relevant_recipes = {} - for line in lines: - if '->' in line: - output = line.split("->")[1].strip() - inputs = line.split("->")[0].strip() - u, v = inputs.split("+") - if output in relevant_recipes: - if (u.strip(), v.strip()) not in relevant_recipes[output]: - relevant_recipes[output].append((u.strip(), v.strip())) - else: - relevant_recipes[output] = [(u.strip(), v.strip())] - - save(relevant_recipes, output_file) +import util def remove_first_discoveries(savefile: str, new_savefile: str): @@ -119,19 +76,6 @@ def count_recipes(file: str): print(len(recipes)) -def load_analog_hors_json(file_name): - try: - db = json.load(open(file_name, 'r')) - except FileNotFoundError: - return {} - - new_db = {} - for key, value in db.items(): - for u, v in value: - new_db[result_key(u, v)] = key - return new_db - - def convert_to_result_first(file_name): with open(file_name, "r") as f: recipes = json.load(f) @@ -309,6 +253,35 @@ def merge_old(file_r: str, file_i: str): recipe_handler.add_recipe(u, v, r) +def merge_sql(file_new: str): + rh = recipe.RecipeHandler(("Water", "Fire", "Wind", "Earth")) + + new_db = sqlite3.connect(file_new) + new_cursor = new_db.cursor() + + # Get everything from the items table + new_cursor.execute("SELECT * FROM items") + for i in new_cursor: + rh.add_item(i[1], i[2], i[3]) + + print("Finished adding all items") + + # Get everything from the recipes table, and convert them to items + new_cursor.execute(""" + SELECT ing1.name, ing2.name, result.name + FROM recipes + JOIN items AS ing1 ON ing1.id = recipes.ingredient1_id + JOIN items AS ing2 ON ing2.id = recipes.ingredient2_id + JOIN items AS result ON result.id = recipes.result_id + """) + num_recipes = 0 + for r in new_cursor: + rh.add_recipe(r[0], r[1], r[2]) + num_recipes += 1 + if num_recipes % 100000 == 0: + print(f"Processed {num_recipes} recipes") + + def get_results_for(results: list[str]): recipe_handler = recipe.RecipeHandler(("Water", "Fire", "Wind", "Earth")) for result in results: @@ -410,7 +383,7 @@ def convert_to_savefile(savefile: str, items_file: str, recipes_file: Optional[s with open(items_file, "r", encoding="utf-8") as f: items = json.load(f) - items_reverse = {v[1]: [v[0], k, v[2]] for k, v in items.items()} + items_reverse: dict[int, list] = {v[1]: [v[0], k, v[2]] for k, v in items.items()} item_count = 0 first_discoveries_count = 0 @@ -429,7 +402,7 @@ def convert_to_savefile(savefile: str, items_file: str, recipes_file: Optional[s # break print(f"Processed {item_count} items") - recipes_limit = 6000000 + recipes_limit = 12000000 if recipes_file: with open(recipes_file, "r", encoding="utf-8") as f: recipes = json.load(f) @@ -449,6 +422,9 @@ def convert_to_savefile(savefile: str, items_file: str, recipes_file: Optional[s v_item = items_reverse[v] result = items_reverse[value][1] + if u_item[1] == result or v_item[1] == result: + continue + u_formatted = { "text": u_item[1], "emoji": u_item[0] @@ -513,17 +489,19 @@ def filter_results(result: str) -> bool: return True -def generate_single_best_recipe(output_file: str): +def generate_single_best_recipe(input_file: str, output_file: str): try: - with open("persistent.json", "r", encoding="utf-8") as file: + with open(input_file, "r", encoding="utf-8") as file: last_state_json = json.load(file) best_recipes = last_state_json["BestRecipes"] except FileNotFoundError: best_recipes = {} - MAX_DEPTH = 11 + MAX_DEPTH = 12 recipe_list = [[] for _ in range(MAX_DEPTH + 1)] for key, value in best_recipes.items(): + if len(value[0]) > MAX_DEPTH: + break if filter_results(key): recipe_list[len(value[0])].append((key, value[0])) @@ -531,33 +509,58 @@ def generate_single_best_recipe(output_file: str): print("Total recipes at each depth: ", [sum([len(x) for x in recipe_list[:i + 1]]) for i in range(1, len(recipe_list))]) - # visited = set() - # count: int = 0 - # with open(output_file, "w", encoding="utf-8") as f: - # for i in range(MAX_DEPTH + 1): - # for key, value in recipe_list[i]: - # # if len(key) != 3 or not all([ord('a') <= ord(x) <= ord('z') for x in key.lower()]): - # # continue - # # if key.lower() in visited: - # # continue - # # visited.add(key.lower()) - # value_str = "\n".join([f"{x[0]} + {x[1]} -> {x[2]}" for x in value]) - # f.write(f"{count+1}: {key}:\n{value_str}\n\n") - # count += 1 + visited = set() + count: int = 0 with open(output_file, "w", encoding="utf-8") as f: - for i in range(10): + for i in range(MAX_DEPTH + 1): for key, value in recipe_list[i]: # if len(key) != 3 or not all([ord('a') <= ord(x) <= ord('z') for x in key.lower()]): # continue + # if len(key) != 1 or not key[0].isalpha(): + # continue # if key.lower() in visited: # continue # visited.add(key.lower()) - f.write(f"{key}\n") + value_str = "\n".join([f"{x[0]} + {x[1]} -> {x[2]}" for x in value]) + f.write(f"{count+1}: {key}:\n{value_str}\n\n") + count += 1 + # with open(output_file, "w", encoding="utf-8") as f: + # for i in range(10): + # for key, value in recipe_list[i]: + # # if len(key) != 3 or not all([ord('a') <= ord(x) <= ord('z') for x in key.lower()]): + # # continue + # # if key.lower() in visited: + # # continue + # # visited.add(key.lower()) + # f.write(f"{key}\n") + + +def compare_persistent_files(file1: str, file2: str): + with open(file1, "r", encoding="utf-8") as f: + data1 = json.load(f) + with open(file2, "r", encoding="utf-8") as f: + data2 = json.load(f) + + recipes1 = data1["BestRecipes"] + recipes2 = data2["BestRecipes"] + missing = [] -def generate_json(output_file: str): + for key, value in recipes1.items(): + if key not in recipes2: + missing.append(key) + continue + depth = len(value[0]) + if depth != len(recipes2[key][0]): + print(f"{key}: {depth} / {len(recipes2[key][0])}") + + print("\n".join(missing)) + return + + +def generate_json(input_file: str, output_file: str): try: - with open("persistent.json", "r", encoding="utf-8") as file: + with open(input_file, "r", encoding="utf-8") as file: last_state_json = json.load(file) best_recipes = last_state_json["BestRecipes"] except FileNotFoundError: @@ -567,6 +570,19 @@ def generate_json(output_file: str): json.dump(best_recipes, f, ensure_ascii=False) +def get_decent_recipe(file: str, item_names: list[str]): + with open(file, "r", encoding="utf-8") as f: + data = json.load(f) + for item_name in item_names: + if item_name in data['BestRecipes']: + for recipe in data['BestRecipes'][item_name]: + for a, b, c in recipe: + print(f"{a} + {b} -> {c}") + print("--------------------------") + else: + print(f"{item_name} not found.") + + def parse_pbpbpb_cancer_list(file: str) -> list[str]: with open(file, "r", encoding="utf-8") as f: lines = f.readlines() @@ -582,11 +598,29 @@ def parse_pbpbpb_cancer_list(file: str) -> list[str]: return cancer_list -cancers = ['Swamp Thing', 'Werewolf', 'Venus Flytrap', 'Flying Fish', 'Giant Venus Flytrap', 'Sharknado', 'Dust Bunny', 'Muddy Wine', 'Steam Engine', 'Dandelion Wine', 'Dust Bowl', 'Steampunk Pirate', 'Dandelion Patch', 'Zombie King', 'Were-tree', 'Rocky Mountains', 'Monster Truck', 'Tornado', 'Dusty Springfield', 'Flat Earth', 'Fire Trap', 'Loch Ness Monster', 'Piranha Plant', 'Giant Dandelion', 'Flying Car', 'Funnel Cake', 'Steam Punk', 'Paper Boat', 'Mountain Dew', 'Pickle Rick', 'Hangover', 'Flying Sushi', 'Muddy Teapot', 'Balsamic Vinegar', 'Steamboat', 'Drunken Dragon', 'Fire Breathing Dragon', 'Flying Cow', 'Swamp Venus', 'Netherite Sword', 'Steam Robot', 'Muddy Sushi', 'Godzilla', 'Dust Storm', 'Poison Ivy', 'Darth Vader', 'Smoky Mountains', 'Chocolate Milk', 'Tsunami', 'Glasser', 'Flying Shark', 'Burning Man', 'Flying Frog', 'Soggy Toast', 'Hot Air Balloon', 'Niagara Falls', 'Wish Upon A Star', 'Mr. Potato Head', 'Swampasaurus', 'Zephyr Train', 'SpongeBob', 'Surf and Turf', 'Surfboard', 'Tea Party', 'Boiling Frog', 'Duck Sauce', 'Dandelion', 'Mecha Dragon', 'Flying Spaghetti Monster', 'Muddy Wind Farm', 'Piggyback', 'Pterodactyl', 'Surfing', 'Birthday Cake', 'Flying Plant', 'Flying Starfish', 'Beef Bourguignon', 'Dandelion Tea', 'Mars Rover', 'Venus Fly Trap', 'Gone With The Wind', 'Thunderbird', 'Flying Pig', 'Big Trouble in Little China', 'Amphibious Car', 'Cheese Wheel', 'Great Wall of China', 'Mudslide', 'Flying Soup', 'Dandelion Soup', 'Kite Surfing', 'Unicorn', 'Sperm Whale', 'Jellyfish', 'Amphicar', 'Chicken Noodle Soup', 'Mermaid', 'Water Rocket', 'Rainbow Trout', 'Lawnmower'] +cancers = ['Swamp Thing', 'Werewolf', 'Venus Flytrap', 'Flying Fish', 'Giant Venus Flytrap', 'Sharknado', 'Dust Bunny', + 'Muddy Wine', 'Steam Engine', 'Dandelion Wine', 'Dust Bowl', 'Steampunk Pirate', 'Dandelion Patch', + 'Zombie King', 'Were-tree', 'Rocky Mountains', 'Monster Truck', 'Tornado', 'Dusty Springfield', 'Flat Earth', + 'Fire Trap', 'Loch Ness Monster', 'Piranha Plant', 'Giant Dandelion', 'Flying Car', 'Funnel Cake', + 'Steam Punk', 'Paper Boat', 'Mountain Dew', 'Pickle Rick', 'Hangover', 'Flying Sushi', 'Muddy Teapot', + 'Balsamic Vinegar', 'Steamboat', 'Drunken Dragon', 'Fire Breathing Dragon', 'Flying Cow', 'Swamp Venus', + 'Netherite Sword', 'Steam Robot', 'Muddy Sushi', 'Godzilla', 'Dust Storm', 'Poison Ivy', 'Darth Vader', + 'Smoky Mountains', 'Chocolate Milk', 'Tsunami', 'Glasser', 'Flying Shark', 'Burning Man', 'Flying Frog', + 'Soggy Toast', 'Hot Air Balloon', 'Niagara Falls', 'Wish Upon A Star', 'Mr. Potato Head', 'Swampasaurus', + 'Zephyr Train', 'SpongeBob', 'Surf and Turf', 'Surfboard', 'Tea Party', 'Boiling Frog', 'Duck Sauce', + 'Dandelion', 'Mecha Dragon', 'Flying Spaghetti Monster', 'Muddy Wind Farm', 'Piggyback', 'Pterodactyl', + 'Surfing', 'Birthday Cake', 'Flying Plant', 'Flying Starfish', 'Beef Bourguignon', 'Dandelion Tea', + 'Mars Rover', 'Venus Fly Trap', 'Gone With The Wind', 'Thunderbird', 'Flying Pig', + 'Big Trouble in Little China', 'Amphibious Car', 'Cheese Wheel', 'Great Wall of China', 'Mudslide', + 'Flying Soup', 'Dandelion Soup', 'Kite Surfing', 'Unicorn', 'Sperm Whale', 'Jellyfish', 'Amphicar', + 'Chicken Noodle Soup', 'Mermaid', 'Water Rocket', 'Rainbow Trout', 'Lawnmower'] + + # PBPBPB: I'm just checking if first and last 2 tokens of an A or B are the same as the resulting element C. -async def try_cancer_combinations(rh: recipe.RecipeHandler, session: aiohttp.ClientSession, word1: str) -> list[tuple[str, str, str]]: +async def try_cancer_combinations(rh: recipe.RecipeHandler, session: aiohttp.ClientSession, word1: str) -> list[ + tuple[str, str, str]]: results = [] for word2 in cancers: result = await rh.combine(session, word1, word2) @@ -597,7 +631,8 @@ async def try_cancer_combinations(rh: recipe.RecipeHandler, session: aiohttp.Cli return results -async def try_in_a_combinations(rh: recipe.RecipeHandler, session: aiohttp.ClientSession, wordlist: list[str]) -> list[tuple[str, str]]: +async def try_in_a_combinations(rh: recipe.RecipeHandler, session: aiohttp.ClientSession, wordlist: list[str]) -> list[ + tuple[str, str]]: results = [] for word in wordlist: # print(word) @@ -636,63 +671,198 @@ async def main(): # print(f"Found {combined} with {u} ({u2}) + {v} ({v2}) -> {result}") +def merge_savefile(file1: str, file2: str, output_file: str): + with open(file1, "r", encoding="utf-8") as f: + data1 = json.load(f) + with open(file2, "r", encoding="utf-8") as f: + data2 = json.load(f) + + new_data = {"elements": [], "recipes": {}, "darkMode": True} + new_elements: dict[str, dict] = {} + for i in data1["elements"]: + if i["text"] not in new_elements: + new_elements[i["text"]] = i + else: + new_elements[i["text"]]["discovered"] = new_elements[i["text"]]["discovered"] or i["discovered"] + for i in data2["elements"]: + if i["text"] not in new_elements: + new_elements[i["text"]] = i + else: + new_elements[i["text"]]["discovered"] = new_elements[i["text"]]["discovered"] or i["discovered"] + + new_data["elements"] = list(new_elements.values()) + + new_recipes = {} + for key, value in data1["recipes"].items(): + if key not in new_recipes: + new_recipes[key] = value + else: + new_recipes[key].extend(value) + for key, value in data2["recipes"].items(): + if key not in new_recipes: + new_recipes[key] = value + else: + new_recipes[key].extend(value) + + new_data["recipes"] = new_recipes + + with open(output_file, "w", encoding="utf-8") as f: + json.dump(new_data, f, ensure_ascii=False) + + +def find_minus_claus(): + rh = recipe.RecipeHandler([]) + items_cur = rh.db.cursor() + items_cur.execute("SELECT * FROM items") + nothing_data = {} + for i in items_cur: + nothing_data[i[2]] = [0, 0] + + recipes_cur = rh.db.cursor() + recipes_cur.execute(""" + SELECT ing1.name, ing2.name, result.name + FROM recipes + JOIN items AS ing1 ON ing1.id = recipes.ingredient1_id + JOIN items AS ing2 ON ing2.id = recipes.ingredient2_id + JOIN items AS result ON result.id = recipes.result_id + """) + recipes = {} + recipes_count = 0 + for r in recipes_cur: + recipes_count += 1 + if recipes_count % 100000 == 0: + print(f"Processed {recipes_count} recipes") + is_nothing = False + if r[2] == "Nothing" or r[2] == "Nothing\t": + is_nothing = True + if r[0] in nothing_data: + nothing_data[r[0]][1] += 1 + if is_nothing: + nothing_data[r[0]][0] += 1 + else: + nothing_data[r[0]] = [0, 1] + if is_nothing: + nothing_data[r[0]][0] += 1 + if r[1] in nothing_data: + nothing_data[r[1]][1] += 1 + if is_nothing: + nothing_data[r[1]][0] += 1 + else: + nothing_data[r[1]] = [0, 1] + if is_nothing: + nothing_data[r[1]][0] += 1 + + # print(nothing_data) + with open("minus_claus_data2.json", "w", encoding="utf-8") as f: + json.dump(nothing_data, f, ensure_ascii=False) + + +def analyze_minus_claus(file: str, persistent_file: str): + with open(file, "r", encoding="utf-8") as f: + data = json.load(f) + + with open(persistent_file, "r", encoding="utf-8") as f: + persistent_data = json.load(f) + items_in_depth_11 = persistent_data["Visited"] + print(len(items_in_depth_11)) + + items: list[tuple[str, int, int]] = [] + for key, value in data.items(): + if value[1] <= 50: + continue + if len(key) > util.WORD_COMBINE_CHAR_LIMIT: + continue + items.append((key, value[0], value[1])) + items.sort(key=lambda x: x[1]/x[2], reverse=True) + for item in items: + if item[1] <= item[2] * 0.99: + break + # Check if item is start case + parts = item[0].split(" ") + is_valid = True + for part in parts: + if not part[0].isalpha(): + continue + if not part[0].isupper(): + is_valid = False + break + for char in part[1:]: + if not char.isalpha(): + continue + if char.isupper(): + is_valid = False + break + if not is_valid and item[0] in items_in_depth_11: + # if item[0] in items_in_depth_11: + print(f"{item[0]}: {item[1]} / {item[2]} ( {item[1] / item[2]} )") + + +def make_ingredients_case_insensitive(): + rh = recipe.RecipeHandler([]) + + recipes_cur = rh.db.cursor() + recipes_cur.execute(""" + SELECT ing1.name, ing2.name, result.name + FROM recipes + JOIN items AS ing1 ON ing1.id = recipes.ingredient1_id + JOIN items AS ing2 ON ing2.id = recipes.ingredient2_id + JOIN items AS result ON result.id = recipes.result_id + """) + recipes_count = 0 + for r in recipes_cur: + recipes_count += 1 + if recipes_count % 100000 == 0: + print(f"Processed {recipes_count} recipes") + + ing1, ing2, result = r + # print(ing1, ing2, result) + ing1 = util.to_start_case(ing1) + ing2 = util.to_start_case(ing2) + # print(ing1, ing2, result) + + rh.add_recipe(ing1, ing2, result) + + +def eeeing_binary_to_text(data: str): + print() + print(data) + for char in data.split(" "): + bits = 0 + for c in char: + if c == "E": + bits = (bits << 1) | 1 + elif c == "e": + bits = (bits << 1) + print(chr(bits), end="") + print() + + +def binary_to_eeeing(data: str): + for char in data: + num = ord(char) + for i in range(8): + if num & (1<<(7-i)): + print("E", end="") + else: + print("e", end="") + # num >>= 1 + print(" ", end="") + print() + + if __name__ == '__main__': pass - if os.name == 'nt': - asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) - asyncio.run(main()) - # generate_single_best_recipe("best_recipes.txt") - # generate_json("all_best_recipes_depth_11.json") - # add_to_recipe_handler("cache/items.json", "cache/recipes.json") - # convert_to_savefile("infinitecraft_large_with_partial_recipes.json", "cache/items.json", "cache/recipes.json") - # cancers = parse_pbpbpb_cancer_list("top_cancer_a.txt") - # print(cancers) - # d = load_save_file("infinitecraft_main_old_save_58k.json") - # print(len(d["elements"])) - # for e in d["elements"]: - # if "mirror" in e["text"].lower(): - # print(e) - # print(d["recipes"]["The Ticket Masters"]) - # print(d["recipes"]["Ticket Master"]) - # print(d["recipes"]["Mirror Master"]) - # remove_first_discoveries("infinitecraft_with_tools.json", "infinitecraft_no_fd.json") - # get_results_for(["Obama"]) - # print(ordered_total(0, 0, 2)) - # alpha_3_tmp("best_recipes_three_letter.txt", "three_letters.txt") - # i = get_items("cache/items.json") - # three_letter = set() - # counter = 0 - # discoveries = 0 - # first_discoveries = 0 - # for key, value in i.items(): - # discoveries += 1 - # if value[2]: - # first_discoveries += 1 - # if key.isalnum() and len(key) == 3 and key.lower() not in three_letter: - # three_letter.add(key.lower()) - # if value[2]: - # print(key, value) - # counter += 1 - # print(discoveries, first_discoveries) - # pass - # merge_old("cache/recipes_o.json", "cache/items_o.json") - # get_recipes_using(["Ab", "AB", "Ac", "AC", "Lord of the Rings", "Lord Of The Rings"]) - # print(ordered_total(0, 0, 9)) # 26248400230 - # print(ordered_total(0, 0, 10)) # 667110736190 - # print(ordered_total(0, 0, 11)) # 18527307559355 - # print(ordered_total(6, 0, 11)) # 3204682702923 - # print(ordered_total(7, 1, 11)) # 1144649542152 - # print(ordered_total(11, 2, 11)) # 146909202170 - # print(ordered_total(16, 3, 11)) # 14715058266 - # print(ordered_total_from_current([-1, -1, -1, -1, 5, 6, 10, 15, 33, 40, 42, 50, 72, 88, 99])) - # print(ordered_total_from_current([-1, -1, -1, -1, 5, 6, 10, 15, 33, 40, 42, 50, 72, 88, 99]) / ordered_total(0, 0, 11)) - # for i in range(15): - # print(i, ordered_total(0, 0, i)) - # other_save = json.load(open("infinitecraft (2).json", 'r', encoding='utf-8')) - # print(len(other_save["elements"])) - # counter = 0 - # for v in other_save["elements"]: - # if v["discovered"]: - # counter += 1 - # print(counter) - # convert_to_id("cache/recipes.json", "cache/items.json", "cache/recipes_id.json", "cache/items_id.json") + # analyze_minus_claus("Searches/Minus Claus/minus_claus_data2.json", + # "Depth 11/persistent_depth11_pass3.json") + merge_sql("Depth 11/recipes_depth11_pass3.db") + # input() + # eeeing_binary_to_text(input()) # binary_to_eeeing("Yes, I most certainly have decoded the message. How have you been rom?") + # binary_to_eeeing("Nini rom, may Luna bless your dreams tonight") + # make_ingredients_case_insensitive() + # if os.name == 'nt': + # asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) + # asyncio.run(main()) + # generate_single_best_recipe("Depth 11/persistent_depth11_pass3.json", "Depth 11/best_recipes_depth_11_pass3.txt") + # compare_persistent_files("Depth 11/persistent_depth11_pass3.json", "Depth 11/persistent_depth11_pass2.json") + # l = [10, 29, 113, 414, 1642, 7823, 39295, 209682] + # print("\n".join([f"{l[i-1]}, {ordered_total(0, 0, i)}" for i in range(1, 9)])) diff --git a/locations.py b/locations.py deleted file mode 100644 index 14fedb2..0000000 --- a/locations.py +++ /dev/null @@ -1,253 +0,0 @@ -# Generating a list of locations for my positioning script -import json - -locations = [] - -# Enquoted alphabets: -quoted_alphabets = [ - ['!', ''], - ['"', '"'], -# ['(', ')'], # Not as useful - ['.', ''], - ['/', '/'], - ['_', ''], - ['~', ''], - ['‘', '’'], - ['“', '”'], -] -# ! < " < # < ' < ( < ) < - < . < / < \ < _ < ~ <<< ‘ < “ - -quoted_alphabet_x = 20 -quoted_alphabet_y = 400 -quoted_alphabet_width = 90 -quoted_alphabet_height = 42 -for i, quote_type in enumerate(quoted_alphabets): - prefix = quote_type[0] - suffix = quote_type[1] - x = i * quoted_alphabet_width + quoted_alphabet_x - for j, char in enumerate("abcdefghijklmnopqrstuvwxyz"): - y = j * quoted_alphabet_height + quoted_alphabet_y - locations.append([prefix + char + suffix, x, y]) - -tool_groups = [ - [ - (20, 30), (42, 100), - [ - "Delete First Character", - "Delete First Letter", - "Delete First Word", - "Delete First" - ] - ], - [ - (20, 215), (42, 100), - [ - "Delete Last Character", - "Delete Last Letter", - "Delete Last Word", - "Delete Last" - ] - ], - [ - (320, 30), (42, 100), - [ - "Delete The Quotation Mark", - "Delete The Quotation Marks" - ] - ], - [ - (320, 145), (42, 100), - [ - "Delete The Dot", - "Delete The Dots", - "Delete The Period" - ] - ], - [ - (320, 260), (42, 100), - [ - "Hyphen", - "Hyphenated", - "Delete The Hyphen" - ] - ], - [ - (570, 135), (42, 100), - [ - "The", - "The ~", - "The Plural", - "The One", - "The The", - "Delete The The" - ] - ], - [ - (625, 30), (42, 100), - [ - "Period", - ".com" - ] - ], - [ - (770, 30), (42, 100), - [ - "Possessive", - ["S'", "S's"], - "Apostrophe", - "Apostrophe S", - "Delete The Apostrophe" - ] - ], - [ - (770, 250), (42, 100), - [ - "Past Tense" - ] - ], - [ - (770, 300), (42, 100), - [ - "Not", - "Antonym" - ] - ], - [ - (770, 400), (42, 100), - [ - "Plural", - "The Plural", - "Pluralize", - "Plurals" - ] - ], - [ - (920, 400), (42, 100), - [ - "Unplural", - "Singular" - ] - ], - [ - (770, 575), (42, 100), - [ - "Without Spaces", - "Without Spacing", - "Delete The Space", - "Makeoneword" - ] - ], - [ - (1000, 575), (42, 100), - [ - "With Spaces", - "With Spacing", - "English Sentence", - "Romanization" - ] - ], - [ - (770, 750), (42, 200), - [ - ["Backwards", "Opposite"], - ["Inverse", "Inverted"], - ["Flip", "Swap"], - ["Reverse", "Anagram"], - "Repeat", - "Makeoneword" - ] - ], - [ - (770, 1030), (42, 100), - [ - ['"0"', '“0”'], - ['"1"', '“1”'], - ['"2"', '“2”'], - ['"3"', '“3”'], - ['"4"', '“4”'], - ['"5"', '“5”'], - ['"6"', '“6”'], - ['"7"', '“7”'], - ['"8"', '“8”'], - ['"9"', '“9”'] - ] - ], - [ - (970, 1030), (42, 100), - [ - ['"+"', '"."'], - ['"?"', '"!"'], - ['"&"', '"|"'], - ['( )', "(", ")"], - ['_', '-'], - ['"', "'"], - ['&', ','], - ['~', '#'], - "“”" - ] - ], - [ - (1000, 30), (42, 100), - [ - "Hashtag", - "The Hashtag", - "Hashtag The", - "Hashtagify" - ] - ], - [ - (1000, 250), (42, 100), - [ - "Next", - "Next Alphabet", - "Next Episode" - ] - ] -] - -for group in tool_groups: - x, y = group[0] - height, width = group[1] - for i, tool in enumerate(group[2]): - if type(tool) is str: - locations.append([tool, x, y + i * height]) - elif type(tool) is tuple or type(tool) is list: - for j, sub_tool in enumerate(tool): - locations.append([sub_tool, x + j * width, y + i * height]) - -# print(locations) -# with open("locations.json", "w", encoding='utf-8') as fout: -# json.dump({"locations": locations}, fout, ensure_ascii=False, indent=4) - -# Pony locations -pony_locations = [] -pony_ep_width = 400 -pony_ep_height = 42 -# column_count = 84 -with open("pony_episodes.txt", "r") as fin: - episodes = fin.readlines() - for i, episode in enumerate(episodes): - if episode.count("\t") != 5: - continue - season, ep_air, ep_production, overall_air, overall_production, name = episode.split("\t") - - s = int(season) - 1 - e = int(ep_air) - 1 - # x = (s // 2) * pony_ep_width - # y = (e + (s % 2) * 26) * pony_ep_height - x = s * pony_ep_width - y = e * pony_ep_height - pony_locations.append([name.strip().replace(',', '').replace('- ', ''), x, y]) - -print(pony_locations) -with open("pony_locations.json", "w", encoding='utf-8') as fout: - json.dump({"locations": pony_locations}, fout, ensure_ascii=False, indent=4) - - -with open("in_a_results.txt", "r") as fin: - lines = fin.readlines() - cnt = 0 - for line in lines: - if len(line.split("-> ")[1]) >= 20: - cnt += 1 - print(len(lines), cnt, cnt / len(lines)) \ No newline at end of file diff --git a/main.py b/main.py index df035e9..89de538 100644 --- a/main.py +++ b/main.py @@ -1,3 +1,4 @@ +import argparse import atexit import os import sys @@ -11,13 +12,11 @@ import aiohttp import recipe +from util import int_to_pair, pair_to_int, DEFAULT_STARTING_ITEMS -# import tracemalloc +init_state: tuple[str, ...] = DEFAULT_STARTING_ITEMS - -init_state: tuple[str, ...] = ("Water", "Fire", "Wind", "Earth") - -# For people who want to start with a lot more things +# For people who want to start with a lot more things, which makes using CLIs impractical elements = ["Hydrogen", "Helium", "Lithium", "Beryllium", "Boron", "Carbon", "Nitrogen", "Oxygen", "Fluorine", "Neon", "Sodium", "Magnesium", "Aluminium", "Silicon", "Phosphorus", "Sulfur", "Chlorine", "Argon", "Potassium", "Calcium", "Scandium", "Titanium", "Vanadium", "Chromium", "Manganese", "Iron", "Cobalt", "Nickel", @@ -39,6 +38,9 @@ rearrange_words = ["Anagram", "Reverse", "Opposite", "Scramble", "Rearrange", "Palindrome", "Not"] +speedrun_current_words = ["Lake", "Plant", "Lily", "Volcano", "Island", "Continent", + "America", "USA", "Tea", "Taxes", "Filing", "File "] + letters2 = [] for l1 in letters: for l2 in letters: @@ -46,45 +48,28 @@ # init_state = tuple(list(init_state) + elements + ["Periodic Table",]) # init_state = tuple(list(init_state) + letters + letters2) -recipe_handler = recipe.RecipeHandler(init_state) -depth_limit = 11 -extra_depth = 1 +# init_state = tuple(list(init_state) + letters) +# init_state = tuple(list(init_state) + speedrun_current_words) best_recipes: dict[str, list[list[tuple[str, str, str]]]] = dict() visited = set() best_depths: dict[str, int] = dict() -best_recipes_file: str = "best_recipes.txt" -all_best_recipes_file: str = "all_best_recipes_depth_10_filtered.json" persistent_file: str = "persistent.json" persistent_temporary_file: str = "persistent2.json" -case_sensitive: bool = True -allow_starting_elements: bool = False -resume_last_run: bool = True + +recipe_handler: Optional[recipe.RecipeHandler] = recipe.RecipeHandler(init_state) +depth_limit = 10 +extra_depth = 0 +case_sensitive = True +allow_starting_elements = False +resume_last_run = True + last_game_state: Optional['GameState'] = None new_last_game_state: Optional['GameState'] = None -autosave_interval = 500 # Save every 500 new visited elements +autosave_interval = 500 # Save persistent file every 500 new visited elements autosave_counter = 0 -@cache -def int_to_pair(n: int) -> tuple[int, int]: - if n < 0: - return -1, -1 - j = 0 - while n > j: - n -= j + 1 - j += 1 - i = n - return i, j - - -@cache -def pair_to_int(i: int, j: int) -> int: - if j < i: - i, j = j, i - return i + (j * (j + 1)) // 2 - - @cache def limit(n: int) -> int: return n * (n + 1) // 2 @@ -149,19 +134,25 @@ async def child(self, session: aiohttp.ClientSession, i: int) -> Optional['GameS u, v = int_to_pair(i) craft_result = await recipe_handler.combine(session, self.items[u], self.items[v]) - # Invalid crafts, items we already have, or items that can be crafted earlier are ignored. - if (craft_result is None or - craft_result == "Nothing" or - (not allow_starting_elements and craft_result in self.items) or - (allow_starting_elements and - (craft_result == self.items[u] or craft_result == self.items[v] or - (craft_result in self.items and self.used[self.items.index(craft_result)] != 0))) or - # Even though we are looking for results in the original list, we still - # Don't want to use the result itself in any craft - craft_result in self.children): + # Invalid crafts, + if craft_result is None or craft_result == "Nothing": return None + # If we don't allow starting elements + if not allow_starting_elements and craft_result in self.items: + return None + + # If we allow starting elements to be crafted, such as searching for optimal periodic table entry points + # We can't craft a used starting element, because that forms a loop. + if allow_starting_elements: + if craft_result == self.items[u] or craft_result == self.items[v]: + return None + if craft_result in self.items and self.used[self.items.index(craft_result)] != 0: + return None + # Make sure we never craft this ever again + if craft_result in self.children: + return None self.children.add(craft_result) # Construct the new state @@ -265,9 +256,6 @@ async def dls(session: aiohttp.ClientSession, state: GameState, depth: int) -> i async def iterative_deepening_dfs(session: aiohttp.ClientSession): - # Clear best recipes file - if not resume_last_run: - open(best_recipes_file, "w").close() curDepth = 1 start_time = time.perf_counter() @@ -299,17 +287,15 @@ async def iterative_deepening_dfs(session: aiohttp.ClientSession): async def main(): # tracemalloc.start() + if resume_last_run: + load_last_state() + headers = recipe.load_json("headers.json")["default"] async with aiohttp.ClientSession() as session: async with session.get("https://neal.fun/infinite-craft/", headers=headers) as resp: print("Status:", resp.status) print("Content-type:", resp.headers['content-type']) - html = await resp.text() - # Save the html - # with open("infinite-craft.html", "w", encoding="utf-8") as file: - # file.write(html) - # print("Body:", html[:15], "...") cookies = session.cookie_jar.filter_cookies('https://neal.fun/infinite-craft/') for key, cookie in cookies.items(): print('Key: "%s", Value: "%s"' % (cookie.key, cookie.value)) @@ -336,10 +322,6 @@ def load_last_state(): last_game_state = None -if resume_last_run: - load_last_state() - - @atexit.register def save_last_state(): print("Autosaving progress...") @@ -356,7 +338,28 @@ def save_last_state(): os.replace(persistent_temporary_file, persistent_file) +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("-s", "--starting-items", nargs="+", default=DEFAULT_STARTING_ITEMS, help="Starting items") + parser.add_argument("-d", "--depth", type=int, default=10, help="Depth limit") + parser.add_argument("-ed", "--extra_depth", type=int, default=0, help="Extra depth for laternate paths") + parser.add_argument("--case-sensitive", action="store_true", help="Case sensitive") + parser.add_argument("--allow-starting-elements", action="store_true", help="Allow starting elements") + parser.add_argument("--resume-last-run", action="store_true", help="Resume last run") + return parser.parse_args() + + if __name__ == "__main__": + # Parse arguments + args = parse_args() + init_state = tuple(args.starting_items) + recipe_handler = recipe.RecipeHandler(init_state) + depth_limit = args.depth + extra_depth = args.extra_depth + case_sensitive = args.case_sensitive + allow_starting_elements = args.allow_starting_elements + resume_last_run = args.resume_last_run + if os.name == 'nt': asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) asyncio.run(main()) diff --git a/optimize.py b/optimize.py index a3434e5..d0bc161 100644 --- a/optimize.py +++ b/optimize.py @@ -1,42 +1,196 @@ # Speedrun Optimizer +import os +import asyncio +import argparse + +import aiohttp import recipe import speedrun +import util +import optimizers.a_star as a_star +import optimizers.simple_generational as simple_generational +from optimizers.optimizer_interface import OptimizerRecipeList + + +# TODO: 2 types of optimization +# 1. In-place: No new elements, only look at subsets of original that can be crafted +# 2. Limited-depth: Allow new elements up to a certain deviation from the original path +# Algorithms: +# a) iddfs (top-down - low depth ONLY!) +# b) A* (bottom-up, single destination) + +# The actual algorithms are implemented in the `optimizers/` folder +# The interface is implemented in `optimizer_interface.py` + + +async def request_extra_generation(session: aiohttp.ClientSession, rh: recipe.RecipeHandler, current: list[str]): + new_items = set() + for item1 in current: + for item2 in current: + new_item = await rh.combine(session, item1, item2) + if new_item and new_item != "Nothing" and new_item not in current: + new_items.add(new_item) + return new_items + + +def get_local_generation(rh: recipe.RecipeHandler, current: list[str]): + new_items = set() + for item1 in current: + for item2 in current: + new_item = rh.get_local(item1, item2) + if new_item and new_item != "Nothing" and new_item not in current: + new_items.add(new_item) + return new_items + + +async def get_all_recipes(session: aiohttp.ClientSession, rh: recipe.RecipeHandler, items: list[str]): + total_recipe_count = len(items) * (len(items) + 1) // 2 + current_recipe = 0 + # Only store valid recipes + recipes = [] + items_set = set([item.lower() for item in items]) + for u, item1 in enumerate(items): + for item2 in items[u:]: + new_item = await rh.combine(session, item1, item2) + if new_item.lower() in items_set: + recipes.append((item1, item2, new_item)) + current_recipe += 1 + + cur_precentage = int(current_recipe / total_recipe_count * 100) + last_precentage = int((current_recipe - 1) / total_recipe_count * 100) + if cur_precentage != last_precentage: + print(f"Recipe Progress: {cur_precentage}% ({current_recipe}/{total_recipe_count})") + return recipes + + +async def initialize_optimizer( + session: aiohttp.ClientSession, + rh: recipe.RecipeHandler, + items: list[str], + extra_generations: int = 1, + local_generations: int = 0) -> OptimizerRecipeList: + # Get extra generations + for i in range(extra_generations): + new_items = await request_extra_generation(session, rh, items) + items.extend(new_items) + print(f"Generation {i + 1} complete with {len(new_items)} new items.") + + # Get extra local generations + for i in range(local_generations): + new_items = get_local_generation(rh, items) + items.extend(new_items) + print(f"Local Generation {i + 1} complete with {len(new_items)} new items.") + + # Get all recipes + recipes = await get_all_recipes(session, rh, items) + recipe_list = OptimizerRecipeList(items) + for recipe_data in recipes: + recipe_list.add_recipe_name(recipe_data[2], recipe_data[0], recipe_data[1]) + return recipe_list + + +async def main(*, + file: str = "speedrun.txt", + ignore_case: bool = False, + extra_generations: int = 1, + local_generations: int = 0, + deviation: int = -1, + target: list[str] = None, + local_only: bool = False): + # Parse crafts file + crafts = speedrun.parse_craft_file(file, ignore_case=ignore_case) + craft_results = [crafts[2] for crafts in crafts] + if target is None: + target = [craft_results[-1], ] + max_crafts = len(crafts) + final_items_for_current_recipe = list(util.DEFAULT_STARTING_ITEMS) + craft_results + + # Request and build items cache + headers = recipe.load_json("headers.json")["default"] + with recipe.RecipeHandler(final_items_for_current_recipe, local_only=local_only) as rh: + async with aiohttp.ClientSession() as session: + async with session.get("https://neal.fun/infinite-craft/", headers=headers) as resp: + pass + optimizer_recipes = await initialize_optimizer( + session, + rh, + final_items_for_current_recipe, + extra_generations, + local_generations) + + # Generate generations + optimizer_recipes.generate_generations() + + # Initial crafts for deviation checking + initial_crafts = [optimizer_recipes.get_id(item) for item in list(util.DEFAULT_STARTING_ITEMS) + craft_results] + + # Artificial targets, when args just don't cut it because there's too many + # alphabets = [chr(i) for i in range(65, 91)] + # target = [] + # for c in alphabets: + # target.append(c) + # target.append(f".{c}") + # target.append(f"\"{c}\"") + # print(target) + + gen_1_pokemon = ['Lapras', 'Squirtle', 'Charizard', 'Magikarp', 'Magmar', 'Pikachu', 'Pidgey', 'Pidgeotto', 'Pidgeot', 'Gyarados', 'Raichu', 'Kingler', 'Blastoise', 'Charmander', 'Charmeleon', 'Bulbasaur', 'Ivysaur', 'Venusaur', 'Geodude', 'Graveler', 'Golem', 'Dragonite', 'Dragonair', 'Seadra', 'Omastar', 'Omanyte', 'Arcanine', 'Flareon', 'Vaporeon', 'Jolteon', 'Eevee', 'Aerodactyl', 'Moltres', 'Zapdos', 'Articuno', 'Cubone', 'Marowak', 'Oddish', 'Gloom', 'Vileplume', 'Jigglypuff', 'Wigglytuff', 'Grimer', 'Muk', 'Koffing', 'Weezing', 'Golduck', 'Psyduck', 'Weedle', 'Kakuna', 'Beedrill', 'Caterpie', 'Butterfree', 'Mewtwo', 'Mew', 'Hitmonlee', 'Hitmonchan', 'Meowth', 'Persian', 'Slowbro', 'Spearow', 'Fearow', 'Zubat', 'Golbat', 'Seaking', 'Goldeen', 'Sandshrew', 'Sandslash', 'Vulpix', 'Ninetales', 'Growlithe', 'Chansey', 'Snorlax', "Farfetch'd", 'Shellder', 'Cloyster', 'Mr. Mime', 'Arbok', 'Scyther', 'Onix', 'Ditto', 'Metapod', 'Dodrio', 'Doduo', 'Kangaskhan', 'Jynx', 'Ekans', 'Wartortle', 'Drowzee', 'Hypno', 'Poliwrath', 'Poliwhirl', 'Poliwag', 'Krabby', 'Nidoking', 'Sneasel', 'Weepinbell', 'Victreebel', 'Bellsprout', 'Raticate', 'Rattata', 'Porygon', 'Tauros', 'Slowpoke', 'Horsea', 'Nidoran', 'Nidorina', 'Nidoqueen', 'Nidorino', 'Magneton', 'Magnemite', 'Starmie', 'Staryu', 'Lickilicky', 'Lickitung', 'Exeggcute', 'Exeggutor', 'Abra', 'Kadabra', 'Alakazam', 'Tentacruel', 'Tentacool', 'Pinsir', 'Clefairy', 'Clefable', 'Paras', 'Parasect', 'Gastly', 'Haunter', 'Gengar', 'Ponyta', 'Rapidash', 'Rhyhorn', 'Rhydon', 'Seel', 'Dewgong', 'Venomoth', 'Venonat', 'Diglett', 'Dugtrio', 'Electrode', 'Voltorb', 'Kabutops', 'Kabuto', 'Tangela', 'Unown', 'Dratini', 'Primeape', 'Machamp', 'Machoke', 'Machop', 'Mankey', 'Electabuzz', 'Missingno'] + target = gen_1_pokemon + + # Run the optimizer + print(f"Optimizing for {target}...") + a_star.optimize(target, optimizer_recipes, max_crafts, initial_crafts, deviation) + + +def parse_arguments(): + parser = argparse.ArgumentParser(description="Speedrun Optimizer") + parser.add_argument("filename", + type=str, + help="The file to read the crafts from") + parser.add_argument("--ignore-case", + dest="ignore_case", + action="store_true", + default=False, + help="Ignore case when parsing the crafts file") + parser.add_argument("-g", "--extra-generations", + dest="extra_generations", + type=int, + default=1, + help="The number of extra generations to generate") + parser.add_argument("-lg", "--local-generations", + dest="local_generations", + type=int, + default=0, + help="The number of local generations to generate") + parser.add_argument("-d", "--deviation", + dest="deviation", + type=int, + default=-1, + help="The maximum deviation from the original path, default off") + parser.add_argument("-t", "--target", + dest="target", + type=str, + nargs="+", + help="The target item to craft") + parser.add_argument("-l", "--local", + dest="local", + action="store_true", + default=False, + help="Use local cache instead of Neal's API") + return parser.parse_args() + +if __name__ == '__main__': + args = parse_arguments() -def parse_craft_file(filename: str): - with open(filename, 'r') as file: - crafts_file = file.readlines() - - # Format: ... + ... -> ... - current = {"Earth": 0, - "Fire": 0, - "Water": 0, - "Wind": 0} - craft_count = 0 - crafts: list[tuple[str, str, str]] = [] - for i, craft in enumerate(crafts_file): - # print(craft) - if craft == '\n' or craft[0] == "#": - continue - ingredients, results = craft.split(' -> ') - ing1, ing2 = ingredients.split(' + ') - crafts.append((ing1.strip(), ing2.strip(), results.strip())) - craft_count += 1 - - if ing1.strip() not in current: - print(f"Ingredient {ing1.strip()} not found in line {i + 1}") - else: - current[ing1.strip()] += 1 - - if ing2.strip() not in current: - print(f"Ingredient {ing2.strip()} not found in line {i + 1}") - else: - current[ing2.strip()] += 1 - - if results.strip() in current: - print(f"Result {results.strip()} already exists in line {i + 1}") - - current[results.strip()] = 0 - # print(f'{ing1} + {ing2} -> {results}') - return crafts + if os.name == 'nt': + asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) + asyncio.run(main( + file=args.filename, + ignore_case=args.ignore_case, + extra_generations=args.extra_generations, + local_generations=args.local_generations, + deviation=args.deviation, + target=args.target, + local_only=args.local + )) diff --git a/optimizers/__init__.py b/optimizers/__init__.py new file mode 100644 index 0000000..00e95f4 --- /dev/null +++ b/optimizers/__init__.py @@ -0,0 +1,2 @@ +import optimizers.a_star +import optimizers.optimizer_interface diff --git a/optimizers/a_star.py b/optimizers/a_star.py new file mode 100644 index 0000000..69cd4c6 --- /dev/null +++ b/optimizers/a_star.py @@ -0,0 +1,264 @@ +""" +A* Optimizer for single destination speedrun, or within a savefile +Requires already having a valid route to the destination - TODO: choose how missing recipes are handled. +Based on BRH0208's A* + +Note: This is not applicable for high-density recipe books or super-high-generation recipes. +because branching factor will apply rather quickly +""" + +from optimizers.optimizer_interface import OptimizerRecipeList, savefile_to_optimizer_recipes +from recipe import RecipeHandler +import heapq + + +class AStarOptimizerState: + craft_count: int + current: set[int] # Crafts to do + crafted: set[int] # Crafts already done + trace: list[tuple[int, int, int]] # The steps to trace back the recipe + heuristic: float # Heuristic value + + def __init__(self, recipe_list: OptimizerRecipeList, craft_count: int, current: set[int], crafted=None, trace=None): + if trace is None: + trace = [] + if crafted is None: + crafted = set() + self.craft_count = craft_count + self.current = current + self.crafted = crafted + self.trace = trace + self.heuristic = self.calc_heuristic(recipe_list) + + def __str__(self): + return f"State with {self.craft_count} crafts and {self.current} remaining. Heuristic is {self.heuristic}" + + def pretty_str(self, recipe_list: OptimizerRecipeList): + todo_crafts = [f"{recipe_list.get_name(x)}" for x in self.current] + done_steps = "\n".join( + [f"{recipe_list.get_name_capitalized(u)} + {recipe_list.get_name_capitalized(v)} -> {recipe_list.get_name_capitalized(result)}" + for u, v, result in self.trace]) + return f"State with {self.craft_count} crafts and {todo_crafts} remaining. Heuristic is {self.heuristic}\n" \ + f"Steps: \n{done_steps}" + + def calc_heuristic_non_admissible(self, recipe_list: OptimizerRecipeList) -> int: + # Non-admissible heuristic: sum of generation for all elements. + return self.craft_count + sum([recipe_list.get_generation_id(x) for x in self.current]) + + def calc_heuristic_simple(self, recipe_list: OptimizerRecipeList) -> int: + # Simple admissible heuristic: maximum generation for all elements. + # Does not take into account repeating generations. + return self.craft_count + max([recipe_list.get_generation_id(x) for x in self.current]) + + def calc_heuristic_complex(self, recipe_list: OptimizerRecipeList) -> float: + # Better admissible heuristic: generation for all elements. + # Takes into account repeating generations. + if len(self.current) == 0: + return self.craft_count + generations = [recipe_list.get_generation_id(i) for i in self.current] + generations.sort() + for i in range(1, len(generations)): + generations[i] = max(generations[i], generations[i - 1] + 1) + return self.craft_count + generations[-1] + # Can possibly add craft_count in an inconsequential way, + # such as - 0.001 * self.craft_count for self.craft_count < 1000 + + calc_heuristic = calc_heuristic_complex + + def get_children(self, u: int) -> set[int]: + # Gets all elements that depends on u + # to check for circular dependencies + dependency_set = {u} + while True: + new_items = set() + for ing1, ing2, result in self.trace[::-1]: + if result in dependency_set: + continue + if ing1 in dependency_set or ing2 in dependency_set: + new_items.add(result) + if len(new_items) == 0: + break + dependency_set.update(new_items) + return dependency_set + + def get_deviations(self, initial_crafts: list[int]) -> int: + # Returns the number of deviations from the initial crafts + return len((self.current | self.crafted).difference(initial_crafts)) + # Note that currently, the initial_crafts are nicely the first items in terms of ID, + # so passing in the initial_crafts is not necessary. However, just in case + # something changes, this will be kept. + + def crafts(self, recipe_list: OptimizerRecipeList) -> list['AStarOptimizerState']: + # Returns the possible crafts from the current state + result = [] + + # Craft highest generation item + item_id = max(self.current, key=lambda x: recipe_list.get_generation_id(x)) + item_children = self.get_children(item_id) + # print(self.pretty_str(recipe_list)) + # print(f"{recipe_list.get_name(item_id)}: {[recipe_list.get_name(x) for x in item_children]}") + + cur_remaining = self.current.copy() + cur_remaining.remove(item_id) + # print(f"Expanding element {recipe_list.get_name_capitalized(item_id)}") + # print(f"{[recipe_list.get_name_capitalized(x) for x in item_children]} are dependent on {recipe_list.get_name_capitalized(item_id)}.") + + for u, v in recipe_list.get_ingredients_id(item_id): + # Check for circular dependencies + if u in item_children or v in item_children: + continue + + # Make new state + new_items = cur_remaining.copy() + new_crafted = self.crafted.copy() + new_crafted.add(item_id) + if recipe_list.get_generation_id(u) != 0 and u not in self.crafted: + new_items.add(u) + if recipe_list.get_generation_id(v) != 0 and v not in self.crafted: + new_items.add(v) + # print(f"Crafting {recipe_list.get_name_capitalized(u)} + {recipe_list.get_name_capitalized(v)} -> {recipe_list.get_name_capitalized(item_id)}") + result.append(AStarOptimizerState(recipe_list, + self.craft_count + 1, + new_items, + new_crafted, + self.trace + [(u, v, item_id)])) + return result + + def is_complete(self) -> bool: + return len(self.current) == 0 + + def __lt__(self, other): + return self.heuristic < other.heuristic + + def __eq__(self, other): + return self.heuristic == other.heuristic + + +def optimize( + targets: list[str], + recipe_list: OptimizerRecipeList, + upper_bound: int, + initial_crafts: list[int] = None, + max_deviations: int = 128) -> list[AStarOptimizerState]: + # Parsing args + check_deviations = False + if initial_crafts and max_deviations >= 0: + check_deviations = True + + # Generate generations + recipe_list.generate_generations() + + target_ids = [recipe_list.get_id(target) for target in targets] + for i, target_id in enumerate(target_ids): + if target_id is None: + raise ValueError(f"Target {targets[i]}not found in recipe list!") + + # Initialize the starting state + start = AStarOptimizerState(recipe_list, 0, set(target_ids)) + + # Priority Queue + priority_queue: list[tuple[float, AStarOptimizerState]] = [] + visited: dict[frozenset[int], int] = {} + processed: set[frozenset[int]] = set() + heapq.heappush(priority_queue, (start.heuristic, start)) + + # Stats + processed_states = 0 + min_heuristic = 0 + final_states: list[AStarOptimizerState] = [] + completed = False + completed_steps = 0 + + # Main loop + while len(priority_queue) > 0: + _, current_state = heapq.heappop(priority_queue) + # Stop if completed and if we are no longer optimal + if completed and current_state.heuristic > completed_steps: + break + + # Information + processed_states += 1 + if current_state.heuristic > min_heuristic: + min_heuristic = current_state.heuristic + print(f"Processed: {processed_states}, Queue length: {len(priority_queue)}, Min heuristic in queue: {min_heuristic}") + if processed_states % 10000 == 0: + print(f"Processed: {processed_states}, Queue length: {len(priority_queue)}, Min heuristic in queue: {min_heuristic}") + + # Save all optimal-steps states + if current_state.is_complete(): + print("Found first solution!") + final_states.append(current_state) + completed_steps = current_state.craft_count + upper_bound = completed_steps + completed = True + continue + + # Check if the current state is already processed + if frozenset(current_state.current) in processed: + continue + processed.add(frozenset(current_state.current)) + + # print("Current state: ", current_state.pretty_str(recipe_list)) + # print("Current state:", current_state) + # print("Queue length: ", len(priority_queue)) + # input() + + # print(f"Current: {current_state}") + next_state: AStarOptimizerState + for next_state in current_state.crafts(recipe_list): + # print(f"Next: {next_state}") + # Check if next state is already in priority queue? Probably not necessary + + # Check if next states exceeds upper bound (by existing recipe) + if next_state.heuristic > upper_bound: + continue + + # Check if the next state exceeds deviation limit + # print(next_state.current, next_state.crafted, initial_crafts, next_state.get_deviations(initial_crafts)) + deviations = next_state.get_deviations(initial_crafts) + if check_deviations and deviations > max_deviations: + continue + + # Check if the next state is already visited + current_set = frozenset(next_state.current) + if current_set in visited: + if visited[current_set] <= next_state.craft_count: + continue + visited[current_set] = next_state.craft_count + heapq.heappush(priority_queue, (next_state.heuristic, next_state)) + + print(f"Complete! {completed_steps} crafts") + print(f"Found {len(final_states)} optimal recipes.") + + # Post-processing - make sure the ordering is correct + for final_state in final_states: + crafted = {0, 1, 2, 3} + steps_copy = final_state.trace.copy() + new_steps = [] + while len(steps_copy) > 0: + for u, v, result in steps_copy: + # print(u, v, result, u in crafted, v in crafted, result in crafted) + if u in crafted and v in crafted: + crafted.add(result) + # print("Crafted", recipe_list.get_name_capitalized(result)) + steps_copy.remove((u, v, result)) + new_steps.append((u, v, result)) + + for u, v, result in new_steps: + print( + f"{recipe_list.get_name_capitalized(u)} + {recipe_list.get_name_capitalized(v)} -> {recipe_list.get_name_capitalized(result)}") + print("\n---------------------------------------------------\n") + + print("Optimization complete!") + + return final_states + + +def main(): + optimize("Firebird", savefile_to_optimizer_recipes("../yui_optimizer_savefile.json"), 12) + # optimize("1444980", savefile_to_optimizer_recipes("../yui_optimizer_savefile.json"), 128) + pass + + +if __name__ == "__main__": + main() diff --git a/optimizers/optimizer_interface.py b/optimizers/optimizer_interface.py new file mode 100644 index 0000000..be3d7d7 --- /dev/null +++ b/optimizers/optimizer_interface.py @@ -0,0 +1,175 @@ +""" +Interfaces for optimizers to use +Provides an in-memory recipe list with O(1) lookup in both directions, +and converting from IDs to names and vice versa. + +Also includes generation of each element's generation +as well as converting from a save file. +""" +from collections import deque +from typing import Optional +from bidict import bidict +import json +from util import int_to_pair, pair_to_int, DEFAULT_STARTING_ITEMS + + +class OptimizerRecipeList: + # Maps item name (lower case) to item id + ids: bidict[str, int] + # Maps ID to item name (capitalized same as items) + id_capitalized: dict[int, str] + # Forward recipe list + # int_to_pair(ingredient1, ingredient2) -> result + fwd: dict[int, int] + # Backward recipe list + # result -> [(ingredient1, ingredient2), (ingredient1, ingredient2)] + bwd: dict[int, list[tuple[int, int]]] + # Generation of each element + # item_id -> generation + gen: Optional[dict[int, int]] + # Whether the generation has been generated, so nothing happens again + gen_generated: bool = False + + def __init__(self, items: list[str]): + self.fwd = {} + self.bwd = {} + self.ids = bidict() + self.id_capitalized = {} + for i, item in enumerate(items): + self.ids[item.lower()] = i + self.id_capitalized[i] = item + self.gen = None + + def __str__(self): + return f"OptimizerRecipeList with {len(self.ids)} items and {len(self.fwd)} recipes" + + def add_item(self, item: str) -> int: + self.ids[item.lower()] = len(self.ids) + self.id_capitalized[len(self.ids) - 1] = item + return len(self.ids) - 1 + + def get_name(self, item_id: int) -> str: + return self.ids.inv[item_id] + + def get_name_capitalized(self, item_id: int) -> str: + return self.id_capitalized[item_id] + + def get_id(self, name: str) -> int: + try: + return self.ids[name.lower()] + except KeyError: + # Silently ignore, because borked savefiles yay + # print(f"{name} not found!") + return self.add_item(name) + + def get_generation_id(self, item_id: int) -> Optional[int]: + if self.gen is None: + return None + return self.gen.get(item_id) + + def add_recipe_id(self, result: int, ingredient1: int, ingredient2: int): + # Add to backward + if result not in self.bwd: + self.bwd[result] = [(ingredient1, ingredient2)] + else: + self.bwd[result].append((ingredient1, ingredient2)) + + # Add to forward + self.fwd[pair_to_int(ingredient1, ingredient2)] = result + + def add_recipe_name(self, result: str, ingredient1: str, ingredient2: str): + self.add_recipe_id(self.get_id(result), self.get_id(ingredient1), self.get_id(ingredient2)) + + def get_ingredients_id(self, result: int) -> list[tuple[int, int]]: + try: + return self.bwd.get(result) + except KeyError: + return [] + + def get_result_id(self, ingredient1: int, ingredient2: int) -> int: + try: + return self.fwd[pair_to_int(ingredient1, ingredient2)] + except KeyError: + return -1 + + def generate_generations(self, init_items: list[str] = DEFAULT_STARTING_ITEMS) -> None: + # O(V^2) time complexity + # TODO: Make this cleaner by not using a queue + # Don't generate if it's already generated + if self.gen_generated: + return + self.gen_generated = True + + self.gen: dict[int, int] = {} # The generation of each element + visited: list[int] = [] # Already processed elements + for item in init_items: + self.gen[self.get_id(item)] = 0 + visited.append(self.get_id(item)) + + queue = deque() + + def enqueue(u: int, v: int): + # What the fuck happened? + if u not in self.gen: + raise ValueError(f"Item {u} not in generation list") + if v not in self.gen: + raise ValueError(f"Item {v} not in generation list") + + # New generation is the old generation + 1 + new_generation: int = max(self.gen[u], self.gen[v]) + 1 + # The crafting result of u + v + new_item: int = self.get_result_id(u, v) + + # Only add if the item isn't visited. Generation will always be increasing since it's effectively bfs. + if new_item and new_item >= 0 and new_item not in self.gen: + self.gen[new_item] = new_generation + queue.append(new_item) + + # Initialize based on what items are available + for i, item1 in enumerate(init_items): + for j, item2 in enumerate(init_items[i:]): + enqueue(self.get_id(item1), self.get_id(item2)) + + while len(queue) > 0: + cur = queue.popleft() + visited.append(cur) + for other in visited: + enqueue(cur, other) + + return + + def hybrid_generations(self, num_steps: int = 5, init_items: list[str] = DEFAULT_STARTING_ITEMS) -> None: + # TODO: Hybrid IDDFS for full steps until num_steps, then generate generations + # Likely a better heuristic than simple generations. + # Trading a bit more precompute for faster algorithm execution / better heuristic. + ... + + +def savefile_to_optimizer_recipes(file: str) -> OptimizerRecipeList: + with open(file, "r", encoding='utf-8') as file: + data = json.load(file) + + recipes_raw: dict[str, list[dict]] = data["recipes"] + elements_raw = data["elements"] + + optimizer = OptimizerRecipeList([element['text'] for element in elements_raw]) + + for result, recipe_list in recipes_raw.items(): + for recipe in recipe_list: + optimizer.add_recipe_name(result, recipe[0]['text'], recipe[1]['text']) + + return optimizer + + +def main(): + savefile_name = "../yui_optimizer_savefile.json" + optimizer_recipes = savefile_to_optimizer_recipes(savefile_name) + print(optimizer_recipes) + optimizer_recipes.generate_generations() + for item_id, generation in optimizer_recipes.gen.items(): + print(f"{optimizer_recipes.get_name(item_id)}: {generation}") + print(optimizer_recipes.gen) + + +if __name__ == '__main__': + main() diff --git a/optimizers/simple_generational.py b/optimizers/simple_generational.py new file mode 100644 index 0000000..9cb9f5a --- /dev/null +++ b/optimizers/simple_generational.py @@ -0,0 +1,148 @@ +import util +from optimizers.optimizer_interface import * + + +def get_children(trace: list[tuple[int, int, int]], u: int) -> set[int]: + # Gets all elements that depends on u + # to check for circular dependencies + dependency_set = {u} + while True: + new_items = set() + for ing1, ing2, result in trace[::-1]: + if result in dependency_set: + continue + if ing1 in dependency_set or ing2 in dependency_set: + new_items.add(result) + if len(new_items) == 0: + break + dependency_set.update(new_items) + return dependency_set + + +def optimize( + target: str, + recipe_list: OptimizerRecipeList, + nonexistent_generation: int): + # Generate generations + recipe_list.generate_generations() + + todo: set[int] = {recipe_list.get_id(target)} + done: set[int] = set() + missing: set[int] = set() + trace: list[tuple[int, int, int]] = [] + + while len(todo) > 0: + try: + cur_id = max(todo, key=lambda x: recipe_list.get_generation_id(x)) + todo.remove(cur_id) + except TypeError: + cur_id = todo.pop() + + if cur_id in done: + continue + done.add(cur_id) + + if recipe_list.get_generation_id(cur_id) == 0: + continue + + if cur_id not in recipe_list.bwd: + missing.add(cur_id) + continue + + min_recipe: Optional[tuple[int, int]] = None + min_cost: float = float('inf') + + # Circular dependency checking + children = get_children(trace, cur_id) + + for u, v in recipe_list.get_ingredients_id(cur_id): + if u in children or v in children: + continue + + cost_u: int = 0 + cost_v: int = 0 + if u not in done and u not in todo: + if recipe_list.get_generation_id(u) is None: + cost_u = nonexistent_generation + else: + cost_u = recipe_list.get_generation_id(u) + if v not in done and v not in todo: + if recipe_list.get_generation_id(v) is None: + cost_v = nonexistent_generation + else: + cost_v = recipe_list.get_generation_id(v) + + if u == v: + cost_v = 0 + + this_cost = util.pair_to_int(cost_u, cost_v) + + # this_cost = max(cost_u, cost_v) # - 1 / (min(cost_u, cost_v) + 1) + + if this_cost < min_cost: + min_cost = this_cost + min_recipe = (u, v) + + if min_recipe is not None: + trace.append((min_recipe[0], min_recipe[1], cur_id)) + todo.add(min_recipe[0]) + todo.add(min_recipe[1]) + else: + # print(f"Missing {cur_id}: {recipe_list.get_name_capitalized(cur_id)}") + missing.add(cur_id) + + print(f"Steps: {len(trace)} | {len(missing)} Missing: {[recipe_list.get_name_capitalized(x) for x in missing]}") + for u, v, result in trace[::-1]: + print( + f"{recipe_list.get_name_capitalized(u)} + {recipe_list.get_name_capitalized(v)} -> {recipe_list.get_name_capitalized(result)}") + + # Post-processing - make sure the ordering is correct + crafted = {0, 1, 2, 3} | missing + print(crafted) + steps_copy = trace.copy() + new_steps = [] + while len(steps_copy) > 0: + for u, v, result in steps_copy: + # print(u, v, result, u in crafted, v in crafted, result in crafted) + if u in crafted and v in crafted: + crafted.add(result) + # print("Crafted", recipe_list.get_name_capitalized(result)) + steps_copy.remove((u, v, result)) + new_steps.append((u, v, result)) + # return + # print(f"Steps left: {len(steps_copy)}") + + for u, v, result in new_steps: + print( + f"{recipe_list.get_name_capitalized(u)} + {recipe_list.get_name_capitalized(v)} -> {recipe_list.get_name_capitalized(result)}") + + +def savefile_to_optimizer_recipes_oopsie(file: str) -> OptimizerRecipeList: + with open(file, "r", encoding='utf-8') as file: + data = json.load(file) + + recipes_raw: dict[str, list[dict]] = data["recipes"] + elements_raw = data["elements"] + + optimizer = OptimizerRecipeList([element['text'] for element in elements_raw]) + + for result, recipe_list in recipes_raw.items(): + for recipe in recipe_list: + # if recipe[0]['text'] in {"Water", "Fire", "Earth", "Wind"} and recipe[1]['text'] in {"Water", "Fire", "Earth", "Wind"}: + # # print(f"Ignored recipe {recipe[0]['text']} + {recipe[1]['text']}") + # continue + optimizer.add_recipe_name(result, recipe[0]['text'], recipe[1]['text']) + + return optimizer + + +def main(): + # optimize("Firebird", savefile_to_optimizer_recipes("../yui_optimizer_savefile.json"), 1000) + # optimize("Lake", savefile_to_optimizer_recipes("../Savefiles/Other People/infinitecraft_14.json"), 1000) + optimize("Chlorosulfuric Acid", savefile_to_optimizer_recipes("../yui_optimizer_missing.json"), 1000) + optimize("Flying Fish", savefile_to_optimizer_recipes("../yui_optimizer_missing.json"), 1000) + pass + + +if __name__ == "__main__": + main() diff --git a/recipe.py b/recipe.py index 8e096b0..9c6c1b5 100644 --- a/recipe.py +++ b/recipe.py @@ -2,6 +2,7 @@ import json import math import os +import random import sys import time import traceback @@ -12,20 +13,8 @@ from bidict import bidict import sqlite3 -WORD_TOKEN_LIMIT = 20 -WORD_COMBINE_CHAR_LIMIT = 30 - - -def pair_to_int(i: int, j: int) -> int: - if j < i: - i, j = j, i - return i + (j * (j + 1)) // 2 - - -def int_to_pair(n: int) -> tuple[int, int]: - j = math.floor(((8 * n + 1) ** 0.5 - 1) / 2) - i = n - (j * (j + 1)) // 2 - return i, j +import util +from util import int_to_pair, pair_to_int, WORD_COMBINE_CHAR_LIMIT # Insert a recipe into the database @@ -64,28 +53,35 @@ def load_json(file: str) -> dict: class RecipeHandler: db: sqlite3.Connection db_location: str = "cache/recipes.db" + closed: bool = False last_request: float = 0 - request_cooldown: float = 0.5 # 0.5s is safe for this API + request_cooldown: float = 0.5 # 0.5s is safe for this API sleep_time: float = 1.0 sleep_default: float = 1.0 retry_exponent: float = 2.0 - local_only: bool = True - trust_cache_nothing: bool = True # Trust the local cache for "Nothing" results - trust_first_run_nothing: bool = False # Save as "Nothing" in the first run + local_only: bool = False + trust_cache_nothing: bool = True # Trust the local cache for "Nothing" results + trust_first_run_nothing: bool = False # Save as "Nothing" in the first run local_nothing_indication: str = "Nothing\t" # Indication of untrusted "Nothing" in the local cache - nothing_verification: int = 3 # Verify "Nothing" n times with the API - nothing_cooldown: float = 5.0 # Cooldown between "Nothing" verifications - connection_timeout: float = 5.0 # Connection timeout + nothing_verification: int = 3 # Verify "Nothing" n times with the API + nothing_cooldown: float = 5.0 # Cooldown between "Nothing" verifications + connection_timeout: float = 10.0 # Connection timeout + + print_new_recipes: bool = True headers: dict[str, str] = {} - def __init__(self, init_state): + def __init__(self, init_state, **kwargs): + # Key word arguments + for key, value in kwargs.items(): + setattr(self, key, value) + # Load headers self.headers = load_json("headers.json")["api"] self.db = sqlite3.connect(self.db_location) - atexit.register(lambda: (self.db.commit(), self.db.close())) + atexit.register(lambda: (self.close())) # Items table self.db.execute(""" CREATE TABLE IF NOT EXISTS items ( @@ -118,14 +114,25 @@ def __init__(self, init_state): # # Nothing is -1, local_nothing_indication is -2 self.add_item_force_id("Nothing", '', False, -1) self.add_item_force_id(self.local_nothing_indication, '', False, -2) - # + # # Get rid of "nothing"s, if we don't trust "nothing"s. - # if not self.trust_cache_nothing: - # temp_set = frozenset(self.recipes_cache.items()) - # for ingredients, result in temp_set: - # if result < 0: - # self.recipes_cache[ingredients] = -2 - # save_json(self.recipes_cache, self.recipes_file) + if not self.trust_cache_nothing: + cur = self.db.cursor() + cur.execute("UPDATE recipes SET result_id = -2 WHERE result_id = -1") + self.db.commit() + + def close(self): + if self.closed: + return + self.db.commit() + self.db.close() + self.closed = True + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() def add_item(self, item: str, emoji: str, first_discovery: bool = False): # print(f"Adding: {item} ({emoji})") @@ -154,9 +161,17 @@ def add_item_force_id(self, item: str, emoji: str, first_discovery: bool = False print(e) def add_recipe(self, a: str, b: str, result: str): + a = util.to_start_case(a) + b = util.to_start_case(b) if a > b: a, b = b, a + # Note that only the *INGREDIENT* will be converted to start case element. + # because ingredient case does not matter. + # The results will not, since the case of the resultant item may be significant. + self.add_starting_item(a, "", False) + self.add_starting_item(b, "", False) + # print(f"Adding: {a} + {b} -> {result}") cur = self.db.cursor() cur.execute(insert_recipe, (a, b, result)) @@ -181,7 +196,8 @@ def save_response(self, a: str, b: str, response: dict): except KeyError: new = False - print(f"New Recipe: {a} + {b} -> {result}") + if self.print_new_recipes: + print(f"New Recipe: {a} + {b} -> {result}") if new: print(f"FIRST DISCOVERY: {a} + {b} -> {result}") @@ -197,8 +213,11 @@ def save_response(self, a: str, b: str, response: dict): self.add_recipe(a, b, result) def get_local(self, a: str, b: str) -> Optional[str]: + a = util.to_start_case(a) + b = util.to_start_case(b) if a > b: a, b = b, a + cur = self.db.cursor() cur.execute(query_recipe, (a, b)) result = cur.fetchone() @@ -266,24 +285,14 @@ def get_crafts(self, result: str) -> list[tuple[str, str]]: # return recipes # Adapted from analog_hors on Discord - async def combine(self, session: aiohttp.ClientSession, a: str, b: str) -> str: + async def combine(self, session: aiohttp.ClientSession, a: str, b: str, *, ignore_local: bool = False) -> str: # Query local cache - local_result = self.get_local(a, b) + local_result = None + if not ignore_local: + local_result = self.get_local(a, b) + # print(f"Local result: {a} + {b} -> {local_result}") if local_result and local_result != self.local_nothing_indication: - # TODO: Censoring - temporary, to see how much of a change it has - # print(local_result) - # if ("slave" in local_result.lower() or - # "terroris" in local_result.lower() or - # "hamas" in local_result.lower() or - # local_result.lower() == 'jew' or - # local_result.lower() == "rape" or - # local_result.lower() == "rapist" or - # local_result.lower() == "pedophile" or - # local_result.lower() == "aids" or - # "Bin Laden" in local_result): - # return "Nothing" - return local_result if self.local_only: @@ -300,7 +309,8 @@ async def combine(self, session: aiohttp.ClientSession, a: str, b: str) -> str: # Increases time taken on requests but should be worth it. # Also note that this can't be asynchronous due to all the optimizations I made assuming a search order time.sleep(self.nothing_cooldown) - print("Re-requesting Nothing result...", flush=True) + if self.print_new_recipes: + print("Re-requesting Nothing result...", flush=True) r = await self.request_pair(session, a, b) @@ -331,9 +341,13 @@ async def request_pair(self, session: aiohttp.ClientSession, a: str, b: str) -> # print(resp.status) if resp.status == 200: self.sleep_time = self.sleep_default - return await resp.json() + return await resp.json(content_type=None) else: print(f"Request failed with status {resp.status}", file=sys.stderr) + if resp.status == 500: + print(f"Internal Server Error when combining {a} + {b}", file=sys.stderr) + return {"result": "Nothing\t", "emoji": "", "isNew": False} + time.sleep(self.sleep_time) self.sleep_time *= self.retry_exponent print("Retrying...", flush=True) @@ -347,6 +361,18 @@ async def request_pair(self, session: aiohttp.ClientSession, a: str, b: str) -> # Testing code / temporary code +async def random_walk(rh: RecipeHandler, session: aiohttp.ClientSession, steps: int): + current_items = set(util.DEFAULT_STARTING_ITEMS) + for i in range(steps): + a = list(current_items)[random.randint(0, len(current_items) - 1)] + b = list(current_items)[random.randint(0, len(current_items) - 1)] + result = await rh.combine(session, a, b) + if result != "Nothing": + current_items.add(result) + print(f"Step {i+1}: {a} + {b} -> {result}") + print(f"{len(current_items)} items: {current_items}") + + async def main(): pass # letters = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", @@ -358,7 +384,16 @@ async def main(): # for l2 in letters: # letters2.append(l1 + l2) # - # r = RecipeHandler([]) + rh = RecipeHandler([]) + headers = load_json("headers.json")["default"] + async with aiohttp.ClientSession() as session: + async with session.get("https://neal.fun/infinite-craft/", headers=headers) as resp: + pass + # await random_walk(rh, session, 5000) + await rh.combine(session, "Ash", "Steam Zeus") + # print(rh.get_crafts("20")) + # print(f"Ash + Steam Zeus = {rh.get_local('Ash', 'Steam Zeus')}") + # letter_recipes = {} # for two_letter_combo in letters2: # uses = r.get_uses(two_letter_combo) diff --git a/requirements.txt b/requirements.txt index d80fcba..3cabf2f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1,15 @@ -bidict~=0.23.1p \ No newline at end of file +aiohttp==3.9.3 +aiosignal==1.3.1 +aiosqlite==0.20.0 +attrs==23.2.0 +bidict==0.23.1 +frozenlist==1.4.1 +idna==3.6 +multidict==6.0.5 +psycopg==3.1.18 +psycopg-binary==3.1.18 +pypiwin32==223 +pywin32==306 +typing_extensions==4.10.0 +tzdata==2024.1 +yarl==1.9.4 \ No newline at end of file diff --git a/speedrun.py b/speedrun.py index 7047c1b..4cd9b81 100644 --- a/speedrun.py +++ b/speedrun.py @@ -1,10 +1,16 @@ +import asyncio import json +import os import sys import time import traceback import urllib +from typing import Optional from urllib.parse import quote_plus from urllib.request import Request, urlopen +import argparse + +import aiohttp import recipe @@ -25,88 +31,204 @@ recipe_handler = None -def static_check_script(filename: str): - with open(filename, 'r') as file: +def parse_craft_file(filename: str, forced_delimiter: Optional[str] = None, *, ignore_case: bool = True, strict_order: bool = False) -> list[tuple[str, str, str]]: + with open(filename, 'r', encoding='utf-8') as file: crafts = file.readlines() - # Format: ... + ... -> ... - current = {"Earth": 0, - "Fire": 0, - "Water": 0, - "Wind": 0} + # Format: ... + ... [delimiter] ... craft_count = 0 + crafts_parsed: list[tuple[str, str, str]] = [] for i, craft in enumerate(crafts): # print(craft) - if craft == '\n' or craft[0] == "#": + if craft == '\n': continue - ingredients, results = craft.split(' -> ') - ing1, ing2 = ingredients.split(' + ') + craft = craft.split(" //")[0].strip() + + # Automatic delimiter detection + delimiter = " = " + if forced_delimiter: + delimiter = forced_delimiter + else: + if " = " in craft: + pass + elif " -> " in craft: + delimiter = " -> " + else: + print(f"Delimiter not found in line {i + 1}") + continue + + try: + ingredients, results = craft.split(delimiter) + ing1, ing2 = ingredients.split(' + ') + except ValueError: + print(f"Delimiter not found in line {i + 1}: {craft}") + continue + if strict_order: + if ing1 > ing2: + ing1, ing2 = ing2, ing1 + ing1, ing2, results = ing1.strip(), ing2.strip(), results.strip() + if ignore_case: + ing1, ing2, results = ing1.lower(), ing2.lower(), results.lower() + crafts_parsed.append((ing1, ing2, results)) craft_count += 1 - if ing1.strip() not in current: - print(f"Ingredient {ing1.strip()} not found in line {i + 1}") + + return crafts_parsed + + +def compare(original: str, new: str, *args, **kwargs): + crafts = parse_craft_file(original, *args, **kwargs) + crafts2 = parse_craft_file(new, *args, **kwargs) + elements = set([craft[2] for craft in crafts]) + elements2 = set([craft[2] for craft in crafts2]) + + # print(set([str(craft) for craft in crafts])) + # print(set([str(craft) for craft in crafts2])) + + elem_additions = set(elements2).difference(elements) + print(f"Added Elements: {', '.join(elem_additions)}") + elem_removals = set(elements).difference(elements2) + print(f"Removed Elements: {', '.join(elem_removals)}") + + additions = [] + removals = [] + changes = {} + for craft in crafts: + if craft[2] not in elements2: + removals.append(craft) else: - current[ing1.strip()] += 1 + if craft not in crafts2: + changes[craft[2]] = [craft, None] + for craft in crafts2: + if craft[2] not in elements: + additions.append(craft) + else: + if craft not in crafts: + changes[craft[2]][1] = craft + + print(f"Added Crafts: {len(additions)}") + for craft in additions: + print(f" {craft[0]} + {craft[1]} -> {craft[2]}") + print(f"Removed Crafts: {len(removals)}") + for craft in removals: + print(f" {craft[0]} + {craft[1]} -> {craft[2]}") + print(f"Changed Crafts: {len(changes)}") + for key, value in changes.items(): + print(f"Original: {value[0][0]} + {value[0][1]} -> {value[0][2]}") + print(f"New: {value[1][0]} + {value[1][1]} -> {value[1][2]}") + print() + return + + +def simple_check_script(filename: str, *args, **kwargs) -> tuple[bool, bool, bool]: + crafts = parse_craft_file(filename, *args, **kwargs) + has_duplicates = False + has_misplaced = False + has_missing = False + + # Format: ... + ... -> ... + current = {"earth": 0, + "fire": 0, + "water": 0, + "wind": 0} + crafted = set() + possible_misplaced = set() + for i, craft in enumerate(crafts): + ing1, ing2, result = craft + ing1, ing2, result = ing1.lower(), ing2.lower(), result.lower() + + if ing1 not in current: + possible_misplaced.add(ing1) + current[ing1] = 1 + else: + current[ing1] += 1 + if ing2.strip() not in current: - print(f"Ingredient {ing2.strip()} not found in line {i + 1}") + possible_misplaced.add(ing2) + current[ing2] = 1 else: - current[ing2.strip()] += 1 - if results.strip() in current: - print(f"Result {results.strip()} already exists in line {i + 1}") + current[ing2] += 1 + + if result in crafted: + print(f"Result {result} already exists in line {i + 1}") + has_duplicates = True + crafted.add(result) + if result not in current: + current[result] = 0 - current[results.strip()] = 0 - # print(f'{ing1} + {ing2} -> {results}') - element_count = 0 - elements_copy = elements.copy() for ingredient, value in current.items(): - if value == 0 and ingredient not in elements_copy: + if value == 0 and ingredient: + # If the ingredient is a result, then it is fine not being used. print(f"Ingredient {ingredient} is not used in any recipe") - if ingredient in elements_copy: - element_count += 1 - elements_copy.remove(ingredient) - print("\n".join([str(elements_copy[i * 10:i * 10 + 10]) for i in range(11)])) - print(craft_count) - # print(current) - current_list = list(current.items()) - current_list.sort(key=lambda x: x[1], reverse=True) - # for k, v in current_list: - # if k in elements: - # continue - # print(f"{k}: {v}") - # print(tuple(current.keys())) - print(element_count) - return current - - -def dynamic_check_script(filename: str): + + for element in possible_misplaced: + if element in crafted: + print(f"Element {element} is misplaced.") + has_misplaced = True + else: + print(f"Element {element} is missing.") + has_missing = True + + return has_duplicates, has_misplaced, has_missing + + +def loop_check_script(filename, *args, **kwargs) -> bool: + crafts = parse_craft_file(filename, *args, **kwargs) + cur_elements = {"earth", "fire", "water", "wind"} + new_order = [] + + while len(cur_elements) < len(crafts) + 4: + has_changes = False + for i, craft in enumerate(crafts): + ing1, ing2, result = craft + ing1, ing2, result = ing1.lower(), ing2.lower(), result.lower() + if ing1 in cur_elements and ing2 in cur_elements and result not in cur_elements: + cur_elements.add(result) + new_order.append(craft) + has_changes = True + if not has_changes: + print("There is a loop in the recipe!") + print("Correct ordering, up to the loop:") + for craft in new_order: + print(f" {craft[0]} + {craft[1]} -> {craft[2]}") + return False + + print("Correct ordering:") + for craft in new_order: + print(f" {craft[0]} + {craft[1]} -> {craft[2]}") + return True + + +def static_check_script(filename: str, *args, **kwargs): + result = simple_check_script(filename, *args, **kwargs) + if not result[0] and result[1] and not result[2]: + print("Trying to correct for misplaced elements...") + loop_check_script(filename, *args, **kwargs) + + +async def dynamic_check_script(filename: str, *args, **kwargs) -> bool: global recipe_handler if recipe_handler is None: recipe_handler = recipe.RecipeHandler(("Water", "Fire", "Wind", "Earth")) - with open(filename, 'r', encoding='utf-8') as file: - crafts = file.readlines() + crafts = parse_craft_file(filename, *args, **kwargs) # Format: ... + ... -> ... - current = {"Earth": 0, - "Fire": 0, - "Water": 0, - "Wind": 0} - craft_count = 0 has_issues = False - for i, craft in enumerate(crafts): - # print(craft) - if craft == '\n' or craft[0] == "#": - continue - craft = craft.replace("'", '’') - ingredients, results = craft.split(' -> ') - ing1, ing2 = ingredients.split(' + ') - craft_count += 1 - true_result = recipe_handler.combine(ing1.strip(), ing2.strip()) - if true_result != results.strip(): - has_issues = True - print(f"Craft {ing1} + {ing2} -> {results} is not correct. The correct response is {true_result}") + async with aiohttp.ClientSession() as session: + headers = recipe.load_json("headers.json")["default"] + async with session.get("https://neal.fun/infinite-craft/", headers=headers) as resp: + pass + for i, craft in enumerate(crafts): + ing1, ing2, result = craft + true_result = await recipe_handler.combine(session, ing1.strip(), ing2.strip()) + + if true_result != result.strip(): + has_issues = True + print(f"Craft {ing1} + {ing2} -> {result} is not correct. The correct response is {true_result}") if not has_issues: print("All recipes are correct!") + return has_issues def count_uses(filename: str): @@ -131,131 +253,28 @@ def count_uses(filename: str): print(current) -def load_best_recipes(filename: str) -> dict[str, list[list[tuple[str, str, str]]]]: - # Loading the all best recipes file for easy element adding - with open(filename, 'r', encoding='utf-8') as file: - lines = file.readlines() - - recipes: dict[str, list[list[tuple[str, str, str]]]] = {} - - current_element = "" - rec_separator = "--" - separator = "-----------------------------------------------" - state = 0 - current_recipe = [] - for line in lines: - try: - line = line[:-1] # Ignore last \n - if state == 0: - # Get current element - current_element = line - state = 1 - continue - if state == 1: - state = 2 - current_recipe = [] - continue - if state == 2: - if line == separator: - state = 0 - if current_element in recipes: - recipes[current_element].append(current_recipe) - else: - recipes[current_element] = [current_recipe] - continue - if line == rec_separator: - state = 1 - if current_element in recipes: - recipes[current_element].append(current_recipe) - else: - recipes[current_element] = [current_recipe] - continue - # Get recipe - elem, w = line.split(" -> ") - u, v = elem.split(" + ", 1) - current_recipe.append((u, v, w)) - except Exception as e: - print(line, state) - - return recipes - - -def add_element(filename: str, element: str, recipes: dict[str, list[list[tuple[str, str, str]]]]): - if element not in recipes: - print(f"Element {element} not found in recipes") - return - - cur_elements = static_check_script(filename) - - best_recipe = [] - best_cost = 1e9 - speedy_recipe = [] - for r in recipes[element]: - cost = len(r) - for u, v, w in r: - if w in cur_elements: - cost -= 1 - else: - speedy_recipe.append((u, v, w)) - # print(cost, r) - if cost < best_cost: - best_cost = cost - best_recipe = speedy_recipe - speedy_recipe = [] - print(f"Best recipe for {element} has cost {best_cost}:") - for u, v, w in best_recipe: - print(f"{u} + {v} -> {w}") - - -def combine_element_pairs(): - global recipe_handler - if recipe_handler is None: - recipe_handler = recipe.RecipeHandler() - - results = {} - for i in range(len(elements)): - for j in range(i, len(elements)): - result = recipe_handler.combine(elements[i], elements[j]) - if result != elements[i] and result != elements[j]: - if result in results: - results[result].append((elements[i], elements[j])) - else: - results[result] = [(elements[i], elements[j])] - - # intermediates = list(results.items()) - # print(intermediates) - - unused_elements = elements.copy() - - for k, v in results.items(): - if k in unused_elements: - unused_elements.remove(k) - if k not in elements: - continue - print(f"{k} can be obtained from {len(v)} methods") - for u, w in v: - print(f"{u} + {w} -> {k}") - print() - - for e in unused_elements: - print(f"{e} can't be made in 1 step") +def parse_args(): + parser = argparse.ArgumentParser(description='Speedrun Checker') + parser.add_argument('action', type=str, help='Action to perform', choices=['static_check', 'dynamic_check', 'compare']) + parser.add_argument('file', type=str, help='File to read from') + parser.add_argument('file2', type=str, help='File to compare to. Ignored unless using the compare action.', nargs='?', default=None) + parser.add_argument('--ignore_case', action='store_true', help='Ignore case when parsing the file') + parser.add_argument('--strict_order', action='store_true', help='Enforce strict order of ingredients') + return parser.parse_args() if __name__ == '__main__': + pass # combine_element_pairs() - static_check_script('v1.7.12-reduced.txt') - # best_recipes = load_best_recipes('expanded_recipes_depth_10.txt') - # count = 0 - # for key in best_recipes: - # for c in key: - # if c.isalnum(): - # continue - # if c == ' ': - # continue - # print(key) - # break - # print(count) - # dynamic_check_script('periodic_table_speedrun_v1.7.4.txt') - # add_element('periodic_table_speedrun_v1.6.8.txt', - # "C", - # load_best_recipes('expanded_recipes_depth_10.txt')) + args = parse_args() + if args.action == 'static_check': + static_check_script(args.file, ignore_case=args.ignore_case) + elif args.action == 'dynamic_check': + if os.name == 'nt': + asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) + asyncio.run(dynamic_check_script(args.file, ignore_case=args.ignore_case)) + elif args.action == 'compare': + if args.file2 is None: + print("No file to compare to!") + sys.exit(1) + compare(args.file, args.file2, ignore_case=args.ignore_case, strict_order=args.strict_order) diff --git a/util.py b/util.py new file mode 100644 index 0000000..de5025a --- /dev/null +++ b/util.py @@ -0,0 +1,29 @@ +import math + +WORD_TOKEN_LIMIT = 20 +WORD_COMBINE_CHAR_LIMIT = 30 +DEFAULT_STARTING_ITEMS = ("Wind", "Fire", "Water", "Earth") + + +def pair_to_int(i: int, j: int) -> int: + if j < i: + i, j = j, i + return i + (j * (j + 1)) // 2 + + +def int_to_pair(n: int) -> tuple[int, int]: + if n < 0: + return -1, -1 + j = math.floor(((8 * n + 1) ** 0.5 - 1) / 2) + i = n - (j * (j + 1)) // 2 + return i, j + + +def to_start_case(s: str) -> str: + new_str = "" + for i in range(len(s)): + if i == 0 or s[i - 1] == " ": + new_str += s[i].upper() + else: + new_str += s[i].lower() + return new_str