HR as far as I understand it 20% of the blocks are ASIC, 20% are Scrypt, 60% are GPU (I do not know if these are equally distributed).
That's not the issue. The block find distribution is 20/20/20/20/20. About that there is no doubt.
The question is how does MultiShield adjust each algo's diff and as a function of exactly what? We know that before the most recent hardfork, global network hashrate (that is the combined total of the 5 algos) was key, and that all the algos diffs adjusted in response to changes in aggregate hashrate. The question is how much did that change? To what precise degree are the algos currently independent, and to what degree are they still inter-dependent?
Yes block distribution is 20/20/20/20/20. To compare the exact changes in the last hard fork compare GetNextWorkRequiredV3 to GetNextWorkRequiredV4.
Current difficulty adjustment code:
https://github.com/digibyte/digibyte/blob/master/src/main.cpp#L1670static unsigned int GetNextWorkRequiredV4(const CBlockIndex* pindexLast, const CBlockHeader *pblock, int algo,bool log)
{
unsigned int nProofOfWorkLimit = Params().ProofOfWorkLimit(algo).GetCompact();
// Genesis block
if (pindexLast == NULL)
return nProofOfWorkLimit;
if (TestNet())
{
// Special difficulty rule for testnet:
// If the new block's timestamp is more than 2* 10 minutes
// then allow mining of a min-difficulty block.
if (pblock->nTime > pindexLast->nTime + nTargetSpacing*2)
return nProofOfWorkLimit;
else
{
// Return the last non-special-min-difficulty-rules-block
const CBlockIndex* pindex = pindexLast;
while (pindex->pprev && pindex->nHeight % nInterval != 0 && pindex->nBits == nProofOfWorkLimit)
pindex = pindex->pprev;
return pindex->nBits;
}
}
if(log)
{
LogPrintf("GetNextWorkRequired RETARGET\n");
LogPrintf("Algo: %s\n", GetAlgoName(algo));
LogPrintf("Height (Before): %s\n", pindexLast->nHeight);
}
// find first block in averaging interval
// Go back by what we want to be nAveragingInterval blocks per algo
const CBlockIndex* pindexFirst = pindexLast;
for (int i = 0; pindexFirst && i < NUM_ALGOS*nAveragingInterval; i++)
{
pindexFirst = pindexFirst->pprev;
}
const CBlockIndex* pindexPrevAlgo = GetLastBlockIndexForAlgo(pindexLast, algo);
if (pindexPrevAlgo == NULL || pindexFirst == NULL)
{
if(log)
LogPrintf("Use default POW Limit\n");
return nProofOfWorkLimit;
}
// Limit adjustment step
// Use medians to prevent time-warp attacks
int64_t nActualTimespan = pindexLast-> GetMedianTimePast() - pindexFirst->GetMedianTimePast();
nActualTimespan = nAveragingTargetTimespanV4 + (nActualTimespan - nAveragingTargetTimespanV4)/4;
if(log)
LogPrintf("nActualTimespan = %d before bounds\n", nActualTimespan);
if (nActualTimespan < nMinActualTimespanV4)
nActualTimespan = nMinActualTimespanV4;
if (nActualTimespan > nMaxActualTimespanV4)
nActualTimespan = nMaxActualTimespanV4;
//Global retarget
CBigNum bnNew;
bnNew.SetCompact(pindexPrevAlgo->nBits);
bnNew *= nActualTimespan;
bnNew /= nAveragingTargetTimespanV4;
//Per-algo retarget
int nAdjustments = pindexPrevAlgo->nHeight + NUM_ALGOS - 1 - pindexLast->nHeight;
if (nAdjustments > 0)
{
for (int i = 0; i < nAdjustments; i++)
{
bnNew *= 100;
bnNew /= (100 + nLocalTargetAdjustment);
}
}
else if (nAdjustments < 0)//make it easier
{
for (int i = 0; i < -nAdjustments; i++)
{
bnNew *= (100 + nLocalTargetAdjustment);
bnNew /= 100;
}
}
if (bnNew > Params().ProofOfWorkLimit(algo))
{
if(log)
{
LogPrintf("bnNew > Params().ProofOfWorkLimit(algo)\n");
}
bnNew = Params().ProofOfWorkLimit(algo);
}
if(log)
{
LogPrintf("nAveragingTargetTimespanV4 = %d; nActualTimespan = %d\n", nAveragingTargetTimespanV4, nActualTimespan);
LogPrintf("Before: %08x %s\n", pindexPrevAlgo->nBits, CBigNum().SetCompact(pindexPrevAlgo->nBits).getuint256().ToString());
LogPrintf("After: %08x %s\n", bnNew.GetCompact(), bnNew.getuint256().ToString());
}
return bnNew.GetCompact();
}