You can immediately reduce position 17 to have a word list from "able" - to - "about". From 18 to 24 good luck in choosing words.

How ??

import mnemonic
import secp256k1 as ice
import multiprocessing
import time
import random
import os
from multiprocessing import cpu_count
# Configuration
TARGET_HASH160 = "f6f5431d25bbf7b12e8add9af5e3475c44a0a5b8" # target hash160
FIXED_WORDS = ["abandon"] * 16 # 16 fixed words
LANGUAGE = 'english'
BATCH_SIZE = 10000 # Print progress every X valid attempts
# Load custom wordlists for positions 17 to 24 (i.e., indices 16 to 23)
CUSTOM_WORDLISTS = []
for i in range(17, 25): # 17.txt to 24.txt
filename = f"{i}.txt"
if not os.path.isfile(filename):
raise FileNotFoundError(f"Missing wordlist file: {filename}")
with open(filename, 'r') as f:
words = [line.strip() for line in f if line.strip()]
CUSTOM_WORDLISTS.append(words)
# Shared counters
attempts = multiprocessing.Value('i', 0) # Total tries
valid_attempts = multiprocessing.Value('i', 0) # Valid mnemonics only
start_time = time.time()
lock = multiprocessing.Lock()
# Initialize mnemonic object and target hash
mnemo = mnemonic.Mnemonic(LANGUAGE)
target_binary = bytes.fromhex(TARGET_HASH160)
def worker(result_queue):
while True:
try:
# Generate one word per position from each custom list (positions 17–24)
random_words = [random.choice(lst) for lst in CUSTOM_WORDLISTS]
# Build full 24-word mnemonic
candidate_words = FIXED_WORDS + random_words
mnemonic_phrase = ' '.join(candidate_words)
# Skip invalid mnemonics
if not mnemo.check(mnemonic_phrase):
with lock:
attempts.value += 1
continue
# Count valid attempt
with lock:
attempts.value += 1
valid_attempts.value += 1
# Progress update
if valid_attempts.value % BATCH_SIZE == 0:
elapsed = time.time() - start_time
rate = valid_attempts.value / max(1, elapsed)
print(f"[Valid] {valid_attempts.value:,} | {rate:,.0f} valid/sec | Current: {' '.join(random_words)}")
# Convert mnemonic to entropy → private key
entropy = mnemo.to_entropy(candidate_words)
private_key_int = int.from_bytes(entropy, 'big')
# Compute hash160
h160 = ice.privatekey_to_h160(0, True, private_key_int)
# Match found?
if h160 == target_binary:
result_queue.put((
True,
mnemonic_phrase,
private_key_int.to_bytes(32, 'big').hex(),
h160.hex()
))
return
except Exception as e:
continue # Skip errors silently
if __name__ == '__main__':
print(f"Starting random mnemonic search with {multiprocessing.cpu_count()} workers...")
# Try to increase process priority
try:
os.nice(-15)
import psutil
p = psutil.Process()
p.cpu_affinity(list(range(cpu_count())))
except:
pass
result_queue = multiprocessing.Queue()
processes = []
# Start workers
for _ in range(multiprocessing.cpu_count()):
p = multiprocessing.Process(target=worker, args=(result_queue,))
processes.append(p)
p.start()
try:
# Wait for match
while True:
success, mnemonic_phrase, privkey_hex, found_hash160 = result_queue.get()
if success:
print("\nSUCCESS! Found matching mnemonic:")
print(f"Full Mnemonic: {mnemonic_phrase}")
print(f"Private Key: {privkey_hex}")
print(f"Hash160: {found_hash160}")
break
except KeyboardInterrupt:
print("\n[!] Stopping workers...")
# Terminate all workers
for p in processes:
p.terminate()
# Cleanup
for p in processes:
p.join()
elapsed = time.time() - start_time
print(f"\nTotal attempts: {attempts.value:,}")
print(f"Valid mnemonics tested: {valid_attempts.value:,}")
print(f"Time elapsed: {elapsed:.2f} seconds")
print(f"Speed: {valid_attempts.value / max(1, elapsed):,.0f} valid mnemonics/sec")
Make sure you have these files in the same directory:
17.txt, 18.txt, 19.txt, 20.txt, 21.tx,t 22.txt, 23.txt and 24.txt
17.txt content:
able
about
above
(17.txt will only work for puzzle 71 with these words)
The fewer words you have in them, the faster and faster the entire script will run.
