From e151fa83dfb2934a4125e68fef17ec52c8372e59 Mon Sep 17 00:00:00 2001 From: ExMingYan <2264568487@qq.com> Date: Mon, 28 Apr 2025 16:09:05 +0800 Subject: [PATCH] =?UTF-8?q?=E5=A2=9E=E5=8A=A0Msys2=E4=BD=BF=E7=94=A8mingw6?= =?UTF-8?q?4=E7=BC=96=E8=AF=91=E6=8C=87=E5=8D=97?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- INSTALL_ZH.md | 2 +- dev_scripts/followers/extract_sprites.py | 4 ++-- dev_scripts/followers/follower_emotions.py | 6 +++--- dev_scripts/followers/front_palette.py | 2 +- dev_scripts/followers/palette.py | 2 +- docs/install/windows/MSYS2_ZH.md | 17 ++++++++++++++++ .../consolidate_contest_opponent_filters.py | 6 +++--- .../1.11/convert_battle_frontier_trainers.py | 6 +++--- migration_scripts/1.8/item_ball_refactor.py | 18 ++++++++--------- .../1.9/battle_anim_moves_refactor.py | 10 +++++----- .../1.9/battle_frontier_convert_parties.py | 8 ++++---- migration_scripts/1.9/convert_item_icons.py | 6 +++--- .../1.9/convert_partner_parties.py | 2 +- .../1.9/convert_trainer_parties.py | 2 +- migration_scripts/1.9/egg_move_refactor.py | 8 ++++---- tools/learnset_helpers/teachable.py | 20 +++++++++---------- 16 files changed, 68 insertions(+), 51 deletions(-) create mode 100644 docs/install/windows/MSYS2_ZH.md diff --git a/INSTALL_ZH.md b/INSTALL_ZH.md index 81e3ab94b1..31fc46d9a4 100644 --- a/INSTALL_ZH.md +++ b/INSTALL_ZH.md @@ -22,7 +22,7 @@ [WSL 安装说明](docs/install/windows/WSL_ZH.md) -[Msys2 安装说明](docs/install/windows/MSYS2.md) +[Msys2 安装说明](docs/install/windows/MSYS2_ZH.md) [Cygwin 安装说明](docs/install/windows/CYGWIN.md) diff --git a/dev_scripts/followers/extract_sprites.py b/dev_scripts/followers/extract_sprites.py index 0e12a83a47..acb46809d8 100644 --- a/dev_scripts/followers/extract_sprites.py +++ b/dev_scripts/followers/extract_sprites.py @@ -11,7 +11,7 @@ import png SPRITESHEETS = [('gen1.png', 15, 11, 1)] output_dir = 'sprites' index_to_name = {} -with open('names.txt', 'r') as f: +with open('names.txt', 'r', encoding="utf-8") as f: for line in f: index, name = line.split(' ')[:2] name = name.strip() @@ -72,7 +72,7 @@ def apply_palette(palette_file, input_file, output_file): # Apply one file's pa inp = png.Reader(input_file) w, h, rows, _ = inp.read() src_palette = tuple(c[:3] for c in inp.palette()) - with open(output_file, 'wb') as f: + with open(output_file, 'wb', encoding="utf-8") as f: new_rows = [] for row in rows: new_rows.append([closest_color(src_palette[c], target_palette) if c else 0 for c in row]) diff --git a/dev_scripts/followers/follower_emotions.py b/dev_scripts/followers/follower_emotions.py index 56ad95c132..201ae60436 100644 --- a/dev_scripts/followers/follower_emotions.py +++ b/dev_scripts/followers/follower_emotions.py @@ -8,7 +8,7 @@ blank_regex = re.compile(r'\(?_+\)?') # Converts a series of message lines to a better format def convert_messages(infile, outfile='emotions.txt'): - with open(infile, 'r') as f_in, open(outfile, 'w') as f_out: + with open(infile, 'r', encoding="utf-8") as f_in, open(outfile, 'w', encoding="utf-8") as f_out: for line in f_in: line = line.rstrip('\n') if line and line[0] == '-': @@ -35,11 +35,11 @@ def prepare_string(s): # Exports up to n messages in C format to outfile def export_messages(infile, outfile, n=None, indent=0, start=0): - with open(infile, 'r') as f_in: + with open(infile, 'r', encoding="utf-8") as f_in: lines = f_in.readlines() if n is not None: lines = lines[:n] - with open(outfile, 'w') as f_out: + with open(outfile, 'w', encoding="utf-8") as f_out: codelines = [' '*indent + f'static const u8 sCondMsg{start+i:02d}[] = _("{prepare_string(s)}");' for i, s in enumerate(lines)] f_out.write('\n'.join(codelines)) print(f'{len(lines)} lines written') diff --git a/dev_scripts/followers/front_palette.py b/dev_scripts/followers/front_palette.py index d0486ba51d..184017e066 100644 --- a/dev_scripts/followers/front_palette.py +++ b/dev_scripts/followers/front_palette.py @@ -39,7 +39,7 @@ def apply_palette(palette_file, input_file, output_file): # Apply one file's pa w, h, rows, info = inp.read() src_palette = tuple(c[:3] for c in inp.palette()) new_rows = [[closest_color(src_palette[c][:3], target_palette) if c else 0 for c in row] for row in rows] - with open(output_file, 'wb') as f: + with open(output_file, 'wb', encoding="utf-8") as f: w = png.Writer(width=w, height=h, bitdepth=4, palette=target_palette) w.write(f, new_rows) diff --git a/dev_scripts/followers/palette.py b/dev_scripts/followers/palette.py index a5f926358a..dfd5566d43 100644 --- a/dev_scripts/followers/palette.py +++ b/dev_scripts/followers/palette.py @@ -12,7 +12,7 @@ def extract_palette(path): r.read() root, _ = os.path.splitext(path) out_path = root + '.pal' - with open(out_path, 'w', newline='\r\n') as f: + with open(out_path, 'w', newline='\r\n', encoding="utf-8") as f: f.write(PAL_PRELUDE) colors = r.palette() if len(colors) < 16: diff --git a/docs/install/windows/MSYS2_ZH.md b/docs/install/windows/MSYS2_ZH.md new file mode 100644 index 0000000000..578b64df5f --- /dev/null +++ b/docs/install/windows/MSYS2_ZH.md @@ -0,0 +1,17 @@ +# Msys2编译指南 + +## 安装并配置Msys2 + +1. 在Msys2的[github](https://github.com/msys2/msys2-installer/releases)页面中下载.exe,下载完成后打开并安装 + +2. 安装后进入Msys2安装目录,运行mingw64.exe + +3. 执行下面的命令以安装需要的依赖 + + ```bash + pacman -S mingw-w64-x86_64-arm-none-eabi-toolchain mingw-w64-x86_64-toolchain mingw-w64-x86_64-libpng mingw-w64-x86_64-python make + ``` + +## 编译 + +使用cd命令进入存放源码的目录,输入make,等待编译完成即可 \ No newline at end of file diff --git a/migration_scripts/1.11/consolidate_contest_opponent_filters.py b/migration_scripts/1.11/consolidate_contest_opponent_filters.py index 4179320cd6..f6495be81d 100644 --- a/migration_scripts/1.11/consolidate_contest_opponent_filters.py +++ b/migration_scripts/1.11/consolidate_contest_opponent_filters.py @@ -8,7 +8,7 @@ if not os.path.exists("Makefile"): # Read contest_opponents.h for file in glob.glob('./src/data/contest_opponents.h'): - with open(file, 'r') as f: + with open(file, 'r', encoding="utf-8") as f: source_content = f.read() # Extract party info from contest_opponents.h @@ -21,7 +21,7 @@ for match in source_pattern.findall(source_content): # Read contest_opponents.h content for file in glob.glob('./src/data/contest_opponents.h'): - with open(file, 'r') as f: + with open(file, 'r', encoding="utf-8") as f: destination_content = f.read() # Modify contest_opponents.h content @@ -40,6 +40,6 @@ modified_content = destination_pattern.sub(add_filter_data, destination_content) # Write the modified content back to contest_opponents.h for file in glob.glob('./src/data/contest_opponents.h'): - with open(file, 'w') as f: + with open(file, 'w', encoding="utf-8") as f: f.write(modified_content) print("contest_opponents.h has been updated") diff --git a/migration_scripts/1.11/convert_battle_frontier_trainers.py b/migration_scripts/1.11/convert_battle_frontier_trainers.py index 0ef9bf3cd8..7c5bf62f0a 100644 --- a/migration_scripts/1.11/convert_battle_frontier_trainers.py +++ b/migration_scripts/1.11/convert_battle_frontier_trainers.py @@ -8,7 +8,7 @@ if not os.path.exists("Makefile"): # Read battle_frontier_trainer_mons.h and extract the party information for file in glob.glob('./src/data/battle_frontier/battle_frontier_trainer_mons.h'): - with open(file, 'r') as f: + with open(file, 'r', encoding="utf-8") as f: source_content = f.read() # Extract party info from battle_frontier_trainer_mons.h @@ -21,7 +21,7 @@ for match in source_pattern.findall(source_content): # Read battle_frontier_trainers.h content for file in glob.glob('./src/data/battle_frontier/battle_frontier_trainers.h'): - with open(file, 'r') as f: + with open(file, 'r', encoding="utf-8") as f: destination_content = f.read() # Modify battle_frontier_trainers.h content @@ -39,6 +39,6 @@ modified_content = destination_pattern.sub(add_party_data, destination_content) # Write the modified content back to battle_frontier_trainers.h for file in glob.glob('./src/data/battle_frontier/battle_frontier_trainers.h'): - with open(file, 'w') as f: + with open(file, 'w', encoding="utf-8") as f: f.write(modified_content) print("battle_frontier_trainers.h has been updated") diff --git a/migration_scripts/1.8/item_ball_refactor.py b/migration_scripts/1.8/item_ball_refactor.py index 364a87d42d..a2bb34d37d 100755 --- a/migration_scripts/1.8/item_ball_refactor.py +++ b/migration_scripts/1.8/item_ball_refactor.py @@ -18,13 +18,13 @@ array_pories = [] # make a list of which script corresponds to which item for file in incs_to_check: - with open(file, "r") as f2: + with open(file, "r", encoding="utf-8") as f2: raw = f2.read() array += re.findall("(.*)::\n[ ]*finditem (.*)\n[ ]*end", raw) # since this doesn't catch poryscript-generated inc files, do the same for poryscript for file in pories_to_check: - with open(file, "r") as f2: + with open(file, "r", encoding="utf-8") as f2: raw = f2.read() array_pories += re.findall("script ([\w]*)[ \n]*\{[ \n]*finditem\((.*)\)[ \n]*\}", raw) @@ -38,7 +38,7 @@ for x in array: # apply changes to inc files for map in glob.glob('./data/maps/*/map.json'): - with open(map, "r") as f2: + with open(map, "r", encoding="utf-8") as f2: data = json.load(f2) if not 'object_events' in data: continue @@ -46,13 +46,13 @@ for map in glob.glob('./data/maps/*/map.json'): if objevent["script"] in dict: objevent["trainer_sight_or_berry_tree_id"] = dict[objevent["script"]] objevent["script"] = "Common_EventScript_FindItem" - with open(map, "w") as f2: + with open(map, "w", encoding="utf-8") as f2: f2.write(json.dumps(data, indent=2) + "\n") # do another map search to find out which finditem scripts would somehow be still in use still_in_use = [] for map in glob.glob('./data/maps/*/map.json'): - with open(map, "r") as f2: + with open(map, "r", encoding="utf-8") as f2: data = json.load(f2) if not 'object_events' in data: continue @@ -66,20 +66,20 @@ for x in list(dict.keys()): # clean up scripts that are now no longer in use for file in incs_to_check: - with open(file, "r") as f2: + with open(file, "r", encoding="utf-8") as f2: raw = f2.read() for unused in list(dict.keys()): raw = re.sub("%s::\n[ ]*finditem (.*)\n[ ]*end\n*" % unused, "", raw) - with open(file, "w") as f2: + with open(file, "w", encoding="utf-8") as f2: f2.write(raw) # also clean up pory files for file in pories_to_check: - with open(file, "r") as f2: + with open(file, "r", encoding="utf-8") as f2: raw = f2.read() for unused in list(dict.keys()): raw = re.sub("script %s[ \n]*\{[ \n]*finditem\((.*)\)[ \n]*\}[ \n]*" % unused, "", raw) - with open(file, "w") as f2: + with open(file, "w", encoding="utf-8") as f2: f2.write(raw) print("Done!") diff --git a/migration_scripts/1.9/battle_anim_moves_refactor.py b/migration_scripts/1.9/battle_anim_moves_refactor.py index a7adac98ff..6302f628ee 100644 --- a/migration_scripts/1.9/battle_anim_moves_refactor.py +++ b/migration_scripts/1.9/battle_anim_moves_refactor.py @@ -8,7 +8,7 @@ def IsCommaMissing(line: str): return False return True -input_file = open('./src/data/moves_info.h', 'r') +input_file = open('./src/data/moves_info.h', 'r', encoding="utf-8") lines = input_file.readlines() input_file.close() @@ -41,23 +41,23 @@ for line in lines: moves_info_lines.append(line) -output_file_mi = open('./src/data/moves_info.h', 'w') +output_file_mi = open('./src/data/moves_info.h', 'w', encoding="utf-8") output_file_mi.writelines(moves_info_lines) output_file_mi.close() -output_file_bas = open('./include/battle_anim_scripts.h', 'w') +output_file_bas = open('./include/battle_anim_scripts.h', 'w', encoding="utf-8") output_file_bas.writelines('#ifndef GUARD_BATTLE_ANIM_SCRIPTS_H\n') output_file_bas.writelines('#define GUARD_BATTLE_ANIM_SCRIPTS_H\n\n') output_file_bas.writelines(battle_anim_lines) output_file_bas.writelines('\n#endif // GUARD_BATTLE_ANIM_SCRIPTS_H\n') output_file_bas.close() -b_anim_scripts_s = open('./data/battle_anim_scripts.s', 'r') +b_anim_scripts_s = open('./data/battle_anim_scripts.s', 'r', encoding="utf-8") lines = b_anim_scripts_s.read() b_anim_scripts_s.close() lines = re.sub(r'(Move_[A-Za-z0-9_]*)([:]+)', r'\1::', lines) -b_anim_scripts_s = open('./data/battle_anim_scripts.s', 'w') +b_anim_scripts_s = open('./data/battle_anim_scripts.s', 'w', encoding="utf-8") b_anim_scripts_s.write(lines) b_anim_scripts_s.close() diff --git a/migration_scripts/1.9/battle_frontier_convert_parties.py b/migration_scripts/1.9/battle_frontier_convert_parties.py index 7a7a000d1b..ac9df6f756 100644 --- a/migration_scripts/1.9/battle_frontier_convert_parties.py +++ b/migration_scripts/1.9/battle_frontier_convert_parties.py @@ -51,12 +51,12 @@ def battle_frontier_mons(data): return data -with open('src/data/battle_frontier/battle_frontier_mons.h', 'r') as file: +with open('src/data/battle_frontier/battle_frontier_mons.h', 'r', encoding="utf-8") as file: data = file.read() -with open('src/data/battle_frontier/battle_frontier_mons.h', 'w') as file: +with open('src/data/battle_frontier/battle_frontier_mons.h', 'w', encoding="utf-8") as file: file.write(battle_frontier_mons(data)) -with open('src/data/battle_frontier/battle_tent.h', 'r') as file: +with open('src/data/battle_frontier/battle_tent.h', 'r', encoding="utf-8") as file: data = file.read() -with open('src/data/battle_frontier/battle_tent.h', 'w') as file: +with open('src/data/battle_frontier/battle_tent.h', 'w', encoding="utf-8") as file: file.write(battle_frontier_mons(data)) diff --git a/migration_scripts/1.9/convert_item_icons.py b/migration_scripts/1.9/convert_item_icons.py index 8bd45a86ae..55639cb3bb 100644 --- a/migration_scripts/1.9/convert_item_icons.py +++ b/migration_scripts/1.9/convert_item_icons.py @@ -8,7 +8,7 @@ if not os.path.exists("Makefile"): # Read item_icon_table.h and extract the icon and palette information for file in glob.glob('./src/data/item_icon_table.h'): - with open(file, 'r') as f: + with open(file, 'r', encoding="utf-8") as f: icon_table_content = f.read() # Extract item icon and palette data from item_icon_table.h @@ -21,7 +21,7 @@ for match in icon_table_pattern.findall(icon_table_content): # Read items.h content for file in glob.glob('./src/data/items.h'): - with open(file, 'r') as f: + with open(file, 'r', encoding="utf-8") as f: items_content = f.read() # Modify items.h content @@ -40,6 +40,6 @@ modified_items_content = item_pattern.sub(add_icon_data, items_content) # Write the modified content back to items.h for file in glob.glob('./src/data/items.h'): - with open(file, 'w') as f: + with open(file, 'w', encoding="utf-8") as f: f.write(modified_items_content) print("items.h has been updated") diff --git a/migration_scripts/1.9/convert_partner_parties.py b/migration_scripts/1.9/convert_partner_parties.py index e2da91499a..b82309068a 100644 --- a/migration_scripts/1.9/convert_partner_parties.py +++ b/migration_scripts/1.9/convert_partner_parties.py @@ -314,6 +314,6 @@ if __name__ == '__main__': except: print(f"usage: python3 {sys.argv[0]} ") else: - with open(trainers_in_path, "r") as trainers_in_h, open(parties_in_path, "r") as parties_in_h, open(out_path, "w") as out_party: + with open(trainers_in_path, "r", encoding="utf-8") as trainers_in_h, open(parties_in_path, "r", encoding="utf-8") as parties_in_h, open(out_path, "w", encoding="utf-8") as out_party: parties = convert_parties(parties_in_path, parties_in_h) trainers = convert_trainers(trainers_in_path, trainers_in_h, parties, out_party) diff --git a/migration_scripts/1.9/convert_trainer_parties.py b/migration_scripts/1.9/convert_trainer_parties.py index 83c9376ae0..f2fa651b17 100644 --- a/migration_scripts/1.9/convert_trainer_parties.py +++ b/migration_scripts/1.9/convert_trainer_parties.py @@ -325,6 +325,6 @@ if __name__ == '__main__': except: print(f"usage: python3 {sys.argv[0]} ") else: - with open(trainers_in_path, "r") as trainers_in_h, open(parties_in_path, "r") as parties_in_h, open(out_path, "w") as out_party: + with open(trainers_in_path, "r", encoding="utf-8") as trainers_in_h, open(parties_in_path, "r", encoding="utf-8") as parties_in_h, open(out_path, "w", encoding="utf-8") as out_party: parties = convert_parties(parties_in_path, parties_in_h) trainers = convert_trainers(trainers_in_path, trainers_in_h, parties, out_party) diff --git a/migration_scripts/1.9/egg_move_refactor.py b/migration_scripts/1.9/egg_move_refactor.py index 64bac84f6e..09a4daef72 100644 --- a/migration_scripts/1.9/egg_move_refactor.py +++ b/migration_scripts/1.9/egg_move_refactor.py @@ -9,7 +9,7 @@ exceptions = [ # the following exceptions are hardcoded to streamline the proces ] # convert egg_moves.h to the new format -with open("src/data/pokemon/egg_moves.h", "r") as f: +with open("src/data/pokemon/egg_moves.h", "r", encoding="utf-8") as f: data = f.read() data = re.sub(r"#define(.|\n)*const u16 gEggMoves\[\] = {", "static const u16 sNoneEggMoveLearnset[] = {\n MOVE_UNAVAILABLE,\n};\n", data) # remove and replace header @@ -29,13 +29,13 @@ data = re.sub(r"\),\n", ",\n MOVE_UNAVAILABLE,\n};\n", data) # add termin data = re.sub(r" MOVE_", " MOVE_", data) # fix indentation -with open("src/data/pokemon/egg_moves.h", "w") as f: +with open("src/data/pokemon/egg_moves.h", "w", encoding="utf-8") as f: f.write(data) # update gBaseStats for file in glob.glob('./src/data/pokemon/species_info/gen_*_families.h'): - with open(file, "r") as f: + with open(file, "r", encoding="utf-8") as f: data = f.read() # go through all Pokemon with teachable learnsets that are also in the list, then assign egg moves to them @@ -47,5 +47,5 @@ for file in glob.glob('./src/data/pokemon/species_info/gen_*_families.h'): if len(macrocheck) > 0: data = re.sub(r"\.teachableLearnset = s" + mon + r"TeachableLearnset," + macrocheck[0] + r"\\\\", ".teachableLearnset = s%sTeachableLearnset,%s\\\\\n .eggMoveLearnset = s%sEggMoveLearnset,%s\\\\" % (mon, macrocheck[0], mon, " " * (len(macrocheck[0]) + 4)), data) - with open(file, "w") as f: + with open(file, "w", encoding="utf-8") as f: f.write(data) diff --git a/tools/learnset_helpers/teachable.py b/tools/learnset_helpers/teachable.py index f4d74ceb6a..49628dea48 100644 --- a/tools/learnset_helpers/teachable.py +++ b/tools/learnset_helpers/teachable.py @@ -4,7 +4,7 @@ import json import os # before all else, abort if the config is off -with open("./include/config/pokemon.h", "r") as file: +with open("./include/config/pokemon.h", "r", encoding="utf-8") as file: learnset_config = re.findall(r"#define P_LEARNSET_HELPER_TEACHABLE *([^ ]*)", file.read()) if len(learnset_config) != 1: quit() @@ -25,7 +25,7 @@ if len(incs_to_check) == 0: # disabled if no jsons present quit() for file in incs_to_check: - with open(file, 'r') as f2: + with open(file, 'r', encoding="utf-8") as f2: raw = f2.read() if 'special ChooseMonForMoveTutor' in raw: for x in re.findall(r'setvar VAR_0x8005, (MOVE_.*)', raw): @@ -33,14 +33,14 @@ for file in incs_to_check: tutor_moves.append(x) # scan TMs and HMs -with open("./include/constants/tms_hms.h", 'r') as file: +with open("./include/constants/tms_hms.h", 'r', encoding="utf-8") as file: for x in re.findall(r'F\((.*)\)', file.read()): if not 'MOVE_' + x in tm_moves: tm_moves.append('MOVE_' + x) # look up universal moves to exclude them universal_moves = [] -with open("./src/pokemon.c", "r") as file: +with open("./src/pokemon.c", "r", encoding="utf-8") as file: for x in re.findall(r"static const u16 sUniversalMoves\[\] =(.|\n)*?{((.|\n)*?)};", file.read())[0]: x = x.replace("\n", "") for y in x.split(","): @@ -53,7 +53,7 @@ with open("./src/pokemon.c", "r") as file: def construct_compatibility_dict(force_custom_check): dict_out = {} for pth in glob.glob('./tools/learnset_helpers/porymoves_files/*.json'): - f = open(pth, 'r') + f = open(pth, 'r', encoding="utf-8") data = json.load(f) for mon in data.keys(): if not mon in dict_out: @@ -75,7 +75,7 @@ def construct_compatibility_dict(force_custom_check): dict_out[mon].append(move) # if the file was not previously generated, check if there is custom data there that needs to be preserved - with open("./src/data/pokemon/teachable_learnsets.h", 'r') as file: + with open("./src/data/pokemon/teachable_learnsets.h", 'r', encoding="utf-8") as file: raw = file.read() if not "// DO NOT MODIFY THIS FILE!" in raw and force_custom_check == True: custom_teachable_compatibilities = {} @@ -103,7 +103,7 @@ def construct_compatibility_dict(force_custom_check): custom_teachable_compatibilities[monname].append(move) # actually store the data in custom.json if os.path.exists("./tools/learnset_helpers/porymoves_files/custom.json"): - f2 = open("./tools/learnset_helpers/porymoves_files/custom.json", "r") + f2 = open("./tools/learnset_helpers/porymoves_files/custom.json", "r", encoding="utf-8") custom_json = json.load(f2) f2.close() else: @@ -115,7 +115,7 @@ def construct_compatibility_dict(force_custom_check): custom_json[x] = {"LevelMoves": [], "PreEvoMoves": [], "TMMoves": [], "EggMoves": [], "TutorMoves": []} for move in custom_teachable_compatibilities[x]: custom_json[x]["TutorMoves"].append(move) - f2 = open("./tools/learnset_helpers/porymoves_files/custom.json", "w") + f2 = open("./tools/learnset_helpers/porymoves_files/custom.json", "w", encoding="utf-8") f2.write(json.dumps(custom_json, indent=2)) f2.close() print("FIRST RUN: Updated custom.json with teachable_learnsets.h's data") @@ -126,7 +126,7 @@ def construct_compatibility_dict(force_custom_check): compatibility_dict = construct_compatibility_dict(True) # actually prepare the file -with open("./src/data/pokemon/teachable_learnsets.h", 'r') as file: +with open("./src/data/pokemon/teachable_learnsets.h", 'r', encoding="utf-8") as file: out = file.read() list_of_mons = re.findall(r'static const u16 s(.*)TeachableLearnset', out) for mon in list_of_mons: @@ -210,5 +210,5 @@ if not "// DO NOT MODIFY THIS FILE!" in out: else: out = re.sub(r"\/\/\n\/\/ DO NOT MODIFY THIS FILE!(.|\n)*\* \/\/\n\n", header, out) -with open("./src/data/pokemon/teachable_learnsets.h", 'w') as file: +with open("./src/data/pokemon/teachable_learnsets.h", 'w', encoding="utf-8", newline="\n") as file: file.write(out)