more or less almost done with the instr reels.

This commit is contained in:
2026-03-25 06:59:00 -06:00
parent 4a659b5f0d
commit 1c971a4e22
11 changed files with 338 additions and 277 deletions

View File

@@ -4,20 +4,19 @@
namespace spider {
InstrReelDyn::InstrReelDyn(u64 length) : _use_count(0), _block_index(0) {
_total_size = length;
growToFit(length > 0 ? length - 1 : 0);
selectBlock(0);
InstrReelDyn::InstrReelDyn(u64 length) : _size(length) {
// Safe int ceil division
growTo((length >> 8) + ((length & 255) != 0));
}
InstrReelDyn::InstrReelDyn(const u8* data, u64 length) {}
InstrReelDyn::InstrReelDyn(const InstrReelDyn& copy) : _use_count(copy._use_count), _block_index(copy._block_index), _blocks(copy._blocks) {
if (_block_index < _blocks.size()) selectBlock(_block_index);
InstrReelDyn::InstrReelDyn(const InstrReelDyn& copy)
: _blocks(copy._blocks), _size(copy._size) {
}
InstrReelDyn::InstrReelDyn(InstrReelDyn&& move) noexcept : _use_count(move._use_count), _block_index(move._block_index), _blocks(std::move(move._blocks)) {
if (_block_index < _blocks.size()) selectBlock(_block_index);
InstrReelDyn::InstrReelDyn(InstrReelDyn&& move) noexcept
: _blocks(std::move(move._blocks)), _size(std::move(move._size)) {
}
InstrReelDyn::~InstrReelDyn() {
@@ -25,170 +24,183 @@ namespace spider {
}
InstrReelDyn& InstrReelDyn::operator=(const InstrReelDyn& copy) {
_use_count = copy._use_count;
_block_index = copy._block_index;
_blocks = copy._blocks;
if (_block_index < _blocks.size()) selectBlock(_block_index);
_size = copy._size;
return *this;
}
InstrReelDyn& InstrReelDyn::operator=(InstrReelDyn&& move) noexcept {
_use_count = move._use_count;
_block_index = move._block_index;
_blocks = std::move(move._blocks);
if (_block_index < _blocks.size()) selectBlock(_block_index);
move._use_count = 0;
move._block_index = 0;
move._mem = nullptr;
move._offset = 0;
move._size = 0;
move._total_size = 0;
_size = std::move(move._size);
return *this;
}
void InstrReelDyn::growToFit(isize index) {
while (_blocks.size() < (index + 1)) {
void InstrReelDyn::growTo(u64 ip) {
u64 b_index = (ip >> 8) + 1;
while (_blocks.size() < b_index) {
_blocks.emplace_back();
}
if (ip >= _size) _size = ip + 1;
}
isize InstrReelDyn::selectIndex(u64 ip) {
return ip / 256;
std::pair<u64, u8> InstrReelDyn::indexOf(u64 ip) {
return { ip >> 8, ip & 0xFF }; // { ip / 256, ip % 256 };
}
InstrReelDyn::ReelBlock* InstrReelDyn::selectBlock(isize index) {
// Update base class cache
auto ptr = &_blocks[index];
_offset = index * 256;
_mem = ptr->data;
_size = 256;
_block_index = index;
//_blocks[block_idx].access_count++;
return ptr;
bool InstrReelDyn::continous(u64 ip0, u64 ip1, u64* b_index, u16* s_index) {
auto i = indexOf(ip0);
*b_index = i.first;
*s_index = i.second;
return i.first == (ip1 >> 8);
}
u8 InstrReelDyn::atU8(u64 ip) {
isize j = selectIndex(ip);
if (j >= _blocks.size()) return 0;
if (j != _block_index) {
this->selectBlock(j);
}
return _mem[ip - _offset];
}
u16 InstrReelDyn::atU16(u64 ip) {
isize j0 = selectIndex(ip);
isize j1 = selectIndex(ip + 1);
if (j1 >= _blocks.size()) return 0;
if (j0 == j1 && j0 != _block_index) {
selectBlock(j0);
}
if (j0 == j1 && j0 == _block_index) {
u16 dat;
spider::loadLE(&dat, _mem);
return dat;
}
// general case, first part
u16 dat = 0;
const u8 size = sizeof(u16);
// select first block and offset
selectBlock(j0);
u8 rem = ip % 256;
for (u8 n = 0; n < size; n++) {
dat |= _mem[rem++] << (n * 8);
ip++;
if (!rem) selectBlock(++j0);
}
return dat;
}
u32 InstrReelDyn::atU32(u64 ip) {
isize j0 = selectIndex(ip);
isize j1 = selectIndex(ip + 3);
if (j1 >= _blocks.size()) return 0;
if (j0 == j1 && j0 != _block_index) {
selectBlock(j0);
}
if (j0 == j1 && j0 == _block_index) {
u32 dat;
spider::loadLE(&dat, _mem);
return dat;
}
// general case, first part
u32 dat = 0;
const u8 size = sizeof(u32);
// select first block and offset
selectBlock(j0);
u8 rem = ip % 256;
for (u8 n = 0; n < size; n++) {
dat |= _mem[rem++] << (n * 8);
ip++;
if (!rem) selectBlock(++j0);
}
return dat;
}
u64 InstrReelDyn::atU64(u64 ip) {
isize j0 = selectIndex(ip);
isize j1 = selectIndex(ip + 3);
if (j1 >= _blocks.size()) return 0;
if (j0 == j1 && j0 != _block_index) {
selectBlock(j0);
}
if (j0 == j1 && j0 == _block_index) {
u64 dat;
spider::loadLE(&dat, _mem);
return dat;
}
// general case, first part
u64 dat = 0;
const u8 size = sizeof(u64);
// select first block and offset
selectBlock(j0);
u8 rem = ip % 256;
for (u8 n = 0; n < size; n++) {
dat |= _mem[rem++] << (n * 8);
ip++;
if (!rem) selectBlock(++j0);
}
return dat;
}
void InstrReelDyn::at(u64 ip, u8 dat) {}
void InstrReelDyn::at(u64 ip, u16 dat) {}
void InstrReelDyn::at(u64 ip, u32 dat) {}
void InstrReelDyn::at(u64 ip, u64 dat) {}
// Particular Cases
/**
* Appends instruction at location.
* Obtains a byte of data at
* the specific location.
* Reindexing may occur, continous access
* may incurr in less penalties.
*/
void InstrReelDyn::append(u64 ip, u16 bc) {}
u8 InstrReelDyn::readU8(u64 ip) {
if (ip + 1 > _size) return 0;
auto i = indexOf(ip);
return _blocks[i.first].data[i.second];
}
/**
* Obtains a byte of data at
* the specific location.
* Reindexing may occur, continous access
* may incurr in less penalties.
*/
u16 InstrReelDyn::readU16(u64 ip) {
if (ip + 2 > _size) return 0;
u16 dat;
u64 b_index;
u16 s_index;
if (continous(ip, ip + 1, &b_index, &s_index)) {
spider::loadLE(&dat, &_blocks[b_index].data[s_index]);
return dat;
}
dat = 0;
for (isize i = 0; i < sizeof(dat); i++) {
auto& b = _blocks[(b_index + s_index) >> 8];
dat |= u16(b.data[s_index++ & 0xFF]) << (i * 8);
}
return dat;
}
/**
* Obtains a byte of data at
* the specific location.
* Reindexing may occur, continous access
* may incurr in less penalties.
*/
u32 InstrReelDyn::readU32(u64 ip) {
if (ip + 4 > _size) return 0;
u32 dat;
u64 b_index;
u16 s_index;
if (continous(ip, ip + 3, &b_index, &s_index)) {
spider::loadLE(&dat, &_blocks[b_index].data[s_index]);
return dat;
}
dat = 0;
for (isize i = 0; i < sizeof(dat); i++) {
auto& b = _blocks[(b_index + s_index) >> 8];
dat |= u32(b.data[s_index++ & 0xFF]) << (i * 8);
}
return dat;
}
/**
* Obtains a byte of data at
* the specific location.
* Reindexing may occur, continous access
* may incurr in less penalties.
*/
u64 InstrReelDyn::readU64(u64 ip) {
if (ip + 8 > _size) return 0;
u64 dat;
u64 b_index;
u16 s_index;
if (continous(ip, ip + 7, &b_index, &s_index)) {
spider::loadLE(&dat, &_blocks[b_index].data[s_index]);
return dat;
}
dat = 0;
for (isize i = 0; i < sizeof(dat); i++) {
auto& b = _blocks[(b_index + s_index) >> 8];
dat |= u64(b.data[s_index++ & 0xFF]) << (i * 8);
}
return dat;
}
/**
* Reads a range of data, and
* outputs it.
*/
void InstrReelDyn::readRange(u64 ip, u8* out, u64 length) {
if (ip + length > _size) {
std::memset(out, 0, length);
return;
}
u64 b_index;
u16 s_index;
if (continous(ip, ip + length, &b_index, &s_index)) {
std::memcpy(out, &_blocks[b_index].data[s_index], length);
return;
}
u64 bytes_read = 0;
while (bytes_read < length) {
u64 remaining_in_block = 256 - s_index;
u64 chunk_size = std::min(remaining_in_block, length - bytes_read);
// Perform bulk copy for the current segment
std::memcpy(out + bytes_read, &_blocks[b_index].data[s_index], chunk_size);
// Advance pointers
bytes_read += chunk_size;
b_index++;
s_index = 0; // reset
}
}
/**
* Current size of the instructions.
*/
u64 InstrReelDyn::size() {
return _size;
}
// Mutation //
// TODO!
void InstrReelDyn::writeU8(u64 ip, u8 dat) {}
void InstrReelDyn::writeU16(u64 ip, u16 dat) {}
void InstrReelDyn::writeU32(u64 ip, u32 dat) {}
void InstrReelDyn::writeU64(u64 ip, u64 dat) {}
/**
* Appends instruction at the end.
*/
void InstrReelDyn::append(u16 bc) {}
/**
* Removes instruction at location.
*/
void InstrReelDyn::remove(u64 ip) {}
}