Next scheduled rescrape ... never
Version 1
Last scraped
Edited on 25/04/2025, 12:51:50 UTC
if you want a perfectly uniform distribution just replace it by a shuffled array of all numbers from 0-N.
Results will be the same.

Your script is biased in the following ways:

1- It does not use H160.

2- It loads the entire set, which means it does not take into account the computational power saved in operations on the curve, double SHA256, and RIPEMD-160 when searching statistically.

3- It does not apply a viable prefix logic.

However, here is this slightly adjusted script, using short prefixes and skipping the computational power saved in operations on the curve and double-SHA256.

It's not 100% fair for what you want in a search by prefixes since it’s implemented at half-capacity, but at least you’ll be able to verify that it’s not as useless as it might seem.

Code:
const { performance } = require("perf_hooks");
const crypto = require("crypto");

const TOTAL_SIZE = 100_000;
const RANGE_SIZE = 5_000;
const PREFIX_LENGTH = 3;
const PREFIX_THRESHOLD = 1;
const NUMBER_SIMULATIONS = 1000;
const SKIP_SIZE = Math.round(4096 * 0.3);

console.log(`
=== Configuration ===
Total numbers: ${TOTAL_SIZE.toLocaleString()}
Block size: ${RANGE_SIZE.toLocaleString()}
Prefix: ${PREFIX_LENGTH} chars (${16 ** PREFIX_LENGTH} comb.)
Skip ${SKIP_SIZE} keys per prefix match
Threshold: ${PREFIX_THRESHOLD} matches/block
Simulations: ${NUMBER_SIMULATIONS}
`);

function generateH160(data) {
  return crypto.createHash("ripemd160").update(data.toString()).digest("hex");
}

function shuffledRange(n) {
  const arr = Array.from({ length: n + 1 }, (_, i) => i);
  for (let i = arr.length - 1; i > 0; i--) {
    const j = Math.floor(Math.random() * (i + 1));
    [arr[i], arr[j]] = [arr[j], arr[i]];
  }
  return arr;
}

function sequentialRandom(totalSize, rangeSize, targetHash) {
  const numRanges = Math.floor(totalSize / rangeSize);
  const rangeOrder = shuffledRange(numRanges - 1);
  let keyChecks = 0;

  for (const blockIndex of rangeOrder) {
    const start = blockIndex * rangeSize;
    const end = (blockIndex + 1) * rangeSize;

    for (let num = start; num < end; num++) {
      const hash = generateH160(num);
      keyChecks++;
      if (hash === targetHash) return keyChecks;
    }
  }
  return keyChecks;
}

function preciseSkipSearch(totalSize, rangeSize, prefixLength, targetHash) {
  const numRanges = Math.floor(totalSize / rangeSize);
  const rangeOrder = shuffledRange(numRanges - 1);
  let keyChecks = 0;
  const targetPrefix = targetHash.substring(0, prefixLength);
  const skippedRanges = [];

  for (const blockIndex of rangeOrder) {
    const start = blockIndex * rangeSize;
    const end = (blockIndex + 1) * rangeSize;
    let prefixCount = 0;
    let current = start;

    while (current < end && prefixCount < PREFIX_THRESHOLD) {
      const hash = generateH160(current);
      keyChecks++;

      if (hash === targetHash) return keyChecks;

      if (hash.startsWith(targetPrefix)) {
        prefixCount++;
        const skipStart = current + 1;
        const skipEnd = Math.min(current + SKIP_SIZE, end);

        if (skipStart < skipEnd) {
          skippedRanges.push({ blockIndex, start: skipStart, end: skipEnd });
        }
        current = skipEnd;
      } else {
        current++;
      }
    }

    if (prefixCount >= PREFIX_THRESHOLD) {
      skippedRanges.push({
        blockIndex,
        start: current + 1,
        end: end - 1
      });
    }
  }

  const shuffledIndices = shuffledRange(skippedRanges.length - 1);
  for (const i of shuffledIndices) {
    const { start, end } = skippedRanges[i];
    for (let num = start; num <= end; num++) {
      const hash = generateH160(num);
      keyChecks++;
      if (hash === targetHash) return keyChecks;
    }
  }

  return keyChecks;
}

async function compareMethods() {
  console.log("Warm-up...");
  for (let i = 0; i < 5; i++) generateH160(i);

  const results = {
    sequential: { checks: [], times: [] },
    precise: { checks: [], times: [] }
  };

  console.log("\nStarting comparison...");
  const startTime = performance.now();

  for (let i = 0; i < NUMBER_SIMULATIONS; i++) {
    const targetNum = Math.floor(Math.random() * TOTAL_SIZE);
    const targetHash = generateH160(targetNum);

    if (i % 2 === 0) {
      const startSeq = performance.now();
      results.sequential.checks.push(sequentialRandom(TOTAL_SIZE, RANGE_SIZE, targetHash));
      results.sequential.times.push(performance.now() - startSeq);

      const startPrecise = performance.now();
      results.precise.checks.push(preciseSkipSearch(TOTAL_SIZE, RANGE_SIZE, PREFIX_LENGTH, targetHash));
      results.precise.times.push(performance.now() - startPrecise);
    } else {
      const startPrecise = performance.now();
      results.precise.checks.push(preciseSkipSearch(TOTAL_SIZE, RANGE_SIZE, PREFIX_LENGTH, targetHash));
      results.precise.times.push(performance.now() - startPrecise);

      const startSeq = performance.now();
      results.sequential.checks.push(sequentialRandom(TOTAL_SIZE, RANGE_SIZE, targetHash));
      results.sequential.times.push(performance.now() - startSeq);
    }

    if ((i + 1) % 10 === 0) console.log(`Test ${i + 1}/${NUMBER_SIMULATIONS}`);
  }

  const calculateStats = (data) => {
    const avg = data.reduce((a, b) => a + b, 0) / data.length;
    const squaredDiffs = data.map(x => Math.pow(x - avg, 2));
    const variance = squaredDiffs.reduce((a, b) => a + b, 0) / data.length;
    const stdDev = Math.sqrt(variance);
    return { avg, min: Math.min(...data), max: Math.max(...data), stdDev };
  };

  const seqStats = calculateStats(results.sequential.checks);
  const preciseStats = calculateStats(results.precise.checks);
  const seqTime = results.sequential.times.reduce((a, b) => a + b, 0) / NUMBER_SIMULATIONS;
  const preciseTime = results.precise.times.reduce((a, b) => a + b, 0) / NUMBER_SIMULATIONS;

  const improvement = ((seqStats.avg - preciseStats.avg) / seqStats.avg * 100).toFixed(2);
  const timeImprovement = ((seqTime - preciseTime) / seqTime * 100).toFixed(2);

  console.log(`
=== FINAL RESULTS ===

Random Sequential:
- Average comparisons: ${seqStats.avg.toFixed(0)}
- Average time: ${(seqTime / 1000).toFixed(3)}s
- Range: [${seqStats.min} - ${seqStats.max}]

Prefixes Search (${SKIP_SIZE} keys skip + threshold ${PREFIX_THRESHOLD}):
- Average comparisons: ${preciseStats.avg.toFixed(0)}
- Average time: ${(preciseTime / 1000).toFixed(3)}s
- Range: [${preciseStats.min} - ${preciseStats.max}]

=== CONCLUSION ===
The ${preciseStats.avg < seqStats.avg ? 'PREFIXES' : 'RANDOM+SEQUENTIAL'} method is more efficient:
- ${improvement}% fewer comparisons
- ${timeImprovement}% faster
`);

  console.log(`Total time: ${((performance.now() - startTime)/1000).toFixed(1)} seconds`);
}

compareMethods();

I'm glad we can now expand on our knowledge of math together.
Did you run your own script ? Because I did. And the results are exactly the same - no improvement over the random sequential method.

Code:
=== FINAL RESULTS ===

Random Sequential:
- Average comparisons: 49649
- Average time: 0.061s
- Range: [6 - 99990]

Prefixes Search (1229 keys skip + threshold 1):
- Average comparisons: 49905
- Average time: 0.062s
- Range: [17 - 99974]

=== CONCLUSION ===
The RANDOM+SEQUENTIAL method is more efficient:
- -0.52% fewer comparisons
- -2.23% faster

Total time: 122.9 seconds

1- It does not use H160.
3- It does not apply a viable prefix logic.
Now it does !

2- It loads the entire set, which means it does not take into account the computational power saved in operations on the curve, double SHA256, and RIPEMD-160 when searching statistically.

Should we now improve it to add EC multiply and SHA256 so that we're EXACTLY in the context you describe ?
We'll find again that prefixes have NO impact.

I'm super happy to do so if you're not yet convinced. I have all the time in the world and this will be a very good starting point next time someone comes here and talks about prefixes.
Let me also know if there is anything left that is different from the real situation, so that I do it all in one go and we dont waste time.


I created a script under the worst possible conditions for prefix searching (intentionally). Even so, if you run several tests, you'll see that the prefix search achieves the same frequency of success as yours in this particular case, though you only publish instances where your method wins.

I designed this to strengthen your concept, spark your interest, and start with a mathematically fair foundation.

Aiming to prevent situations like this:

Should we now improve it to add EC multiply and SHA256 so that we're EXACTLY in the context you describe ?

Nah, you'd still be biased:

- it's not running in the 69 range; the immutable set is not the same....
- it's not searching for the correct prefix; I mean, birthday paradox......
- it's not finding the key as fast as it finds it once you have a million prefixes known in advance... wrong proximities calculated....
- wrong parameters; they need to be this or that so the results are better

etc...

You have too much free time Cheesy Cheesy Cheesy

You have no idea. I can do this as long as I need to be Smiley
Now that we have an tangible base to work with, it's only a matter of time until we reach the empirical truth Smiley

How could I take them seriously and make a proposal on equal terms in the pursuit of truth if I receive childish responses?

I don’t have as much time as you do; I work, and I don’t have a PC that can run quick tests.

I don’t want to waste time making scripts that won’t provide anything beyond childish things.

It wouldn’t even be hard for me to accept that I’m wrong if a properly conducted test determines it. In any case, why should failing be bad? The only thing that matters is reaching the truth under correct conditions, right?


Quote
Did you run your own script ?

Code:
=== FINAL RESULTS ===

Random Sequential:
- Average comparisons: 50851
- Average time: 2.813s
- Range: [30 - 99910]

Prefixes Search (1229 keys skip + threshold 1):
- Average comparisons: 48709
- Average time: 2.729s
- Range: [217 - 99910]

=== CONCLUSION ===
The PREFIXES method is more efficient:
- 4.21% fewer comparisons
- 3.00% faster

Total time: 5542.8 seconds
Original archived Re: Bitcoin puzzle transaction ~32 BTC prize to who solves it
Scraped on 18/04/2025, 12:51:44 UTC
if you want a perfectly uniform distribution just replace it by a shuffled array of all numbers from 0-N.
Results will be the same.

Your script is biased in the following ways:

1- It does not use H160.

2- It loads the entire set, which means it does not take into account the computational power saved in operations on the curve, double SHA256, and RIPEMD-160 when searching statistically.

3- It does not apply a viable prefix logic.

However, here is this slightly adjusted script, using short prefixes and skipping the computational power saved in operations on the curve and double-SHA256.

It's not 100% fair for what you want in a search by prefixes since it’s implemented at half-capacity, but at least you’ll be able to verify that it’s not as useless as it might seem.

Code:
const { performance } = require("perf_hooks");
const crypto = require("crypto");

const TOTAL_SIZE = 100_000;
const RANGE_SIZE = 5_000;
const PREFIX_LENGTH = 3;
const PREFIX_THRESHOLD = 1;
const NUMBER_SIMULATIONS = 1000;
const SKIP_SIZE = Math.round(4096 * 0.3);

console.log(`
=== Configuration ===
Total numbers: ${TOTAL_SIZE.toLocaleString()}
Block size: ${RANGE_SIZE.toLocaleString()}
Prefix: ${PREFIX_LENGTH} chars (${16 ** PREFIX_LENGTH} comb.)
Skip ${SKIP_SIZE} keys per prefix match
Threshold: ${PREFIX_THRESHOLD} matches/block
Simulations: ${NUMBER_SIMULATIONS}
`);

function generateH160(data) {
  return crypto.createHash("ripemd160").update(data.toString()).digest("hex");
}

function shuffledRange(n) {
  const arr = Array.from({ length: n + 1 }, (_, i) => i);
  for (let i = arr.length - 1; i > 0; i--) {
    const j = Math.floor(Math.random() * (i + 1));
    [arr[i], arr[j]] = [arr[j], arr[i]];
  }
  return arr;
}

function sequentialRandom(totalSize, rangeSize, targetHash) {
  const numRanges = Math.floor(totalSize / rangeSize);
  const rangeOrder = shuffledRange(numRanges - 1);
  let keyChecks = 0;

  for (const blockIndex of rangeOrder) {
    const start = blockIndex * rangeSize;
    const end = (blockIndex + 1) * rangeSize;

    for (let num = start; num < end; num++) {
      const hash = generateH160(num);
      keyChecks++;
      if (hash === targetHash) return keyChecks;
    }
  }
  return keyChecks;
}

function preciseSkipSearch(totalSize, rangeSize, prefixLength, targetHash) {
  const numRanges = Math.floor(totalSize / rangeSize);
  const rangeOrder = shuffledRange(numRanges - 1);
  let keyChecks = 0;
  const targetPrefix = targetHash.substring(0, prefixLength);
  const skippedRanges = [];

  for (const blockIndex of rangeOrder) {
    const start = blockIndex * rangeSize;
    const end = (blockIndex + 1) * rangeSize;
    let prefixCount = 0;
    let current = start;

    while (current < end && prefixCount < PREFIX_THRESHOLD) {
      const hash = generateH160(current);
      keyChecks++;

      if (hash === targetHash) return keyChecks;

      if (hash.startsWith(targetPrefix)) {
        prefixCount++;
        const skipStart = current + 1;
        const skipEnd = Math.min(current + SKIP_SIZE, end);

        if (skipStart < skipEnd) {
          skippedRanges.push({ blockIndex, start: skipStart, end: skipEnd });
        }
        current = skipEnd;
      } else {
        current++;
      }
    }

    if (prefixCount >= PREFIX_THRESHOLD) {
      skippedRanges.push({
        blockIndex,
        start: current + 1,
        end: end - 1
      });
    }
  }

  const shuffledIndices = shuffledRange(skippedRanges.length - 1);
  for (const i of shuffledIndices) {
    const { start, end } = skippedRanges[i];
    for (let num = start; num <= end; num++) {
      const hash = generateH160(num);
      keyChecks++;
      if (hash === targetHash) return keyChecks;
    }
  }

  return keyChecks;
}

async function compareMethods() {
  console.log("Warm-up...");
  for (let i = 0; i < 5; i++) generateH160(i);

  const results = {
    sequential: { checks: [], times: [] },
    precise: { checks: [], times: [] }
  };

  console.log("\nStarting comparison...");
  const startTime = performance.now();

  for (let i = 0; i < NUMBER_SIMULATIONS; i++) {
    const targetNum = Math.floor(Math.random() * TOTAL_SIZE);
    const targetHash = generateH160(targetNum);

    if (i % 2 === 0) {
      const startSeq = performance.now();
      results.sequential.checks.push(sequentialRandom(TOTAL_SIZE, RANGE_SIZE, targetHash));
      results.sequential.times.push(performance.now() - startSeq);

      const startPrecise = performance.now();
      results.precise.checks.push(preciseSkipSearch(TOTAL_SIZE, RANGE_SIZE, PREFIX_LENGTH, targetHash));
      results.precise.times.push(performance.now() - startPrecise);
    } else {
      const startPrecise = performance.now();
      results.precise.checks.push(preciseSkipSearch(TOTAL_SIZE, RANGE_SIZE, PREFIX_LENGTH, targetHash));
      results.precise.times.push(performance.now() - startPrecise);

      const startSeq = performance.now();
      results.sequential.checks.push(sequentialRandom(TOTAL_SIZE, RANGE_SIZE, targetHash));
      results.sequential.times.push(performance.now() - startSeq);
    }

    if ((i + 1) % 10 === 0) console.log(`Test ${i + 1}/${NUMBER_SIMULATIONS}`);
  }

  const calculateStats = (data) => {
    const avg = data.reduce((a, b) => a + b, 0) / data.length;
    const squaredDiffs = data.map(x => Math.pow(x - avg, 2));
    const variance = squaredDiffs.reduce((a, b) => a + b, 0) / data.length;
    const stdDev = Math.sqrt(variance);
    return { avg, min: Math.min(...data), max: Math.max(...data), stdDev };
  };

  const seqStats = calculateStats(results.sequential.checks);
  const preciseStats = calculateStats(results.precise.checks);
  const seqTime = results.sequential.times.reduce((a, b) => a + b, 0) / NUMBER_SIMULATIONS;
  const preciseTime = results.precise.times.reduce((a, b) => a + b, 0) / NUMBER_SIMULATIONS;

  const improvement = ((seqStats.avg - preciseStats.avg) / seqStats.avg * 100).toFixed(2);
  const timeImprovement = ((seqTime - preciseTime) / seqTime * 100).toFixed(2);

  console.log(`
=== FINAL RESULTS ===

Random Sequential:
- Average comparisons: ${seqStats.avg.toFixed(0)}
- Average time: ${(seqTime / 1000).toFixed(3)}s
- Range: [${seqStats.min} - ${seqStats.max}]

Prefixes Search (${SKIP_SIZE} keys skip + threshold ${PREFIX_THRESHOLD}):
- Average comparisons: ${preciseStats.avg.toFixed(0)}
- Average time: ${(preciseTime / 1000).toFixed(3)}s
- Range: [${preciseStats.min} - ${preciseStats.max}]

=== CONCLUSION ===
The ${preciseStats.avg < seqStats.avg ? 'PREFIXES' : 'RANDOM+SEQUENTIAL'} method is more efficient:
- ${improvement}% fewer comparisons
- ${timeImprovement}% faster
`);

  console.log(`Total time: ${((performance.now() - startTime)/1000).toFixed(1)} seconds`);
}

compareMethods();

I'm glad we can now expand on our knowledge of math together.
Did you run your own script ? Because I did. And the results are exactly the same - no improvement over the random sequential method.

Code:
=== FINAL RESULTS ===

Random Sequential:
- Average comparisons: 49649
- Average time: 0.061s
- Range: [6 - 99990]

Prefixes Search (1229 keys skip + threshold 1):
- Average comparisons: 49905
- Average time: 0.062s
- Range: [17 - 99974]

=== CONCLUSION ===
The RANDOM+SEQUENTIAL method is more efficient:
- -0.52% fewer comparisons
- -2.23% faster

Total time: 122.9 seconds

1- It does not use H160.
3- It does not apply a viable prefix logic.
Now it does !

2- It loads the entire set, which means it does not take into account the computational power saved in operations on the curve, double SHA256, and RIPEMD-160 when searching statistically.

Should we now improve it to add EC multiply and SHA256 so that we're EXACTLY in the context you describe ?
We'll find again that prefixes have NO impact.

I'm super happy to do so if you're not yet convinced. I have all the time in the world and this will be a very good starting point next time someone comes here and talks about prefixes.
Let me also know if there is anything left that is different from the real situation, so that I do it all in one go and we dont waste time.


I created a script under the worst possible conditions for prefix searching (intentionally). Even so, if you run several tests, you'll see that the prefix search achieves the same frequency of success as yours in this particular case, though you only publish instances where your method wins.

I designed this to strengthen your concept, spark your interest, and start with a mathematically fair foundation.

Aiming to prevent situations like this:

Should we now improve it to add EC multiply and SHA256 so that we're EXACTLY in the context you describe ?

Nah, you'd still be biased:

- it's not running in the 69 range; the immutable set is not the same....
- it's not searching for the correct prefix; I mean, birthday paradox......
- it's not finding the key as fast as it finds it once you have a million prefixes known in advance... wrong proximities calculated....
- wrong parameters; they need to be this or that so the results are better

etc...

You have too much free time Cheesy Cheesy Cheesy

You have no idea. I can do this as long as I need to be Smiley
Now that we have an tangible base to work with, it's only a matter of time until we reach the empirical truth Smiley

How could I take them seriously and make a proposal on equal terms in the pursuit of truth if I receive childish responses?

I don’t have as much time as you do; I work, and I don’t have a PC that can run quick tests.

I don’t want to waste time making scripts that won’t provide anything beyond childish things.

It wouldn’t even be hard for me to accept that I’m wrong if a properly conducted test determines it. In any case, why should failing be bad? The only thing that matters is reaching the truth under correct conditions, right?