moved reel to dedicated folder because it was hogging the cpu folder

This commit is contained in:
2026-03-23 22:29:05 -06:00
parent 3a6fc6cfb9
commit 4a659b5f0d
8 changed files with 4 additions and 3 deletions

View File

@@ -3,7 +3,8 @@
#include <spider/runtime/native/machine.hpp>
#include <spider/runtime/memory/RAM.hpp>
#include <spider/runtime/cpu/InstrReel.hpp>
#include <spider/runtime/reel/InstrReel.hpp>
#if __cplusplus >= 202002L
#include <bit>

View File

@@ -1,77 +0,0 @@
#include "InstrReel.hpp"
#include <spider/runtime/cpu/CPU.hpp>
#include <spider/runtime/memory/Types.hpp>
namespace spider {
// Public Interface //
InstrReel::InstrReel() : _mem(nullptr), _size(0), _offset(0), _total_size(0) {}
InstrReel::~InstrReel() {}
// Instruction abstraction //
u8 InstrReel::atU8(u64 ip) {
// guard against access
u64 ip_p = ip - _offset;
if(ip_p + 1 > _size) return 0;
// send byte
return _mem[ip];
}
u16 InstrReel::atU16(u64 ip) {
// guard against access
u64 ip_p = ip - _offset;
if(ip_p + 2 > _size) return 0;
// build a 16-bit big endian number
u16 dat;
spider::loadLE(&dat, _mem + ip_p);
return dat;
}
u32 InstrReel::atU32(u64 ip) {
// guard against access
u64 ip_p = ip - _offset;
if(ip_p + 4 > _size) return 0;
// build a 32-bit big endian number
u32 dat;
spider::loadLE(&dat, _mem + ip_p);
return dat;
}
u64 InstrReel::atU64(u64 ip) {
// guard against access
u64 ip_p = ip - _offset;
if(ip_p + 8 > _size) return 0;
// build a 64-bit big endian number
u64 dat;
spider::loadLE(&dat, _mem + ip_p);
return dat;
}
u64 InstrReel::size() {
return _total_size;
}
// Static Utils //
u16 InstrReel::unpackInstr(u16 bcode) {
return (bcode >> 5) & 0x1FF;
}
u8 InstrReel::unpackAddrMode(u16 bcode) {
return (bcode >> 2) & 0x1F;
}
u8 InstrReel::unpackTypeSize(u16 bcode) {
return bcode & 0x3;
}
}

View File

@@ -1,74 +0,0 @@
#pragma once
#include <spider/SpiderRuntime.hpp>
#include <spider/runtime/memory/ByteArray.hpp>
namespace spider {
/**
* Implements an instruction reel.
*/
class InstrReel {
protected: // Current accessing range //
u8* _mem;
isize _size;
isize _offset;
isize _total_size;
public:
InstrReel();
virtual ~InstrReel();
public:
/**
* Obtains a byte of data at
* the specific location.
* Reindexing may occur, continous access
* may incurr in less penalties.
*/
virtual u8 atU8(u64 ip);
/**
* Obtains a byte of data at
* the specific location.
* Reindexing may occur, continous access
* may incurr in less penalties.
*/
virtual u16 atU16(u64 ip);
/**
* Obtains a byte of data at
* the specific location.
* Reindexing may occur, continous access
* may incurr in less penalties.
*/
virtual u32 atU32(u64 ip);
/**
* Obtains a byte of data at
* the specific location.
* Reindexing may occur, continous access
* may incurr in less penalties.
*/
virtual u64 atU64(u64 ip);
/**
* Current size of the instructions.
*/
virtual u64 size();
public: // Static Utils //
static u16 unpackInstr(u16 bcode);
static u8 unpackAddrMode(u16 bcode);
static u8 unpackTypeSize(u16 bcode);
};
}

View File

@@ -1,194 +0,0 @@
#include "InstrReelDyn.hpp"
#include <spider/runtime/memory/Types.hpp>
namespace spider {
InstrReelDyn::InstrReelDyn(u64 length) : _use_count(0), _block_index(0) {
_total_size = length;
growToFit(length > 0 ? length - 1 : 0);
selectBlock(0);
}
InstrReelDyn::InstrReelDyn(const u8* data, u64 length) {}
InstrReelDyn::InstrReelDyn(const InstrReelDyn& copy) : _use_count(copy._use_count), _block_index(copy._block_index), _blocks(copy._blocks) {
if (_block_index < _blocks.size()) selectBlock(_block_index);
}
InstrReelDyn::InstrReelDyn(InstrReelDyn&& move) noexcept : _use_count(move._use_count), _block_index(move._block_index), _blocks(std::move(move._blocks)) {
if (_block_index < _blocks.size()) selectBlock(_block_index);
}
InstrReelDyn::~InstrReelDyn() {
// .. //
}
InstrReelDyn& InstrReelDyn::operator=(const InstrReelDyn& copy) {
_use_count = copy._use_count;
_block_index = copy._block_index;
_blocks = copy._blocks;
if (_block_index < _blocks.size()) selectBlock(_block_index);
return *this;
}
InstrReelDyn& InstrReelDyn::operator=(InstrReelDyn&& move) noexcept {
_use_count = move._use_count;
_block_index = move._block_index;
_blocks = std::move(move._blocks);
if (_block_index < _blocks.size()) selectBlock(_block_index);
move._use_count = 0;
move._block_index = 0;
move._mem = nullptr;
move._offset = 0;
move._size = 0;
move._total_size = 0;
return *this;
}
void InstrReelDyn::growToFit(isize index) {
while (_blocks.size() < (index + 1)) {
_blocks.emplace_back();
}
}
isize InstrReelDyn::selectIndex(u64 ip) {
return ip / 256;
}
InstrReelDyn::ReelBlock* InstrReelDyn::selectBlock(isize index) {
// Update base class cache
auto ptr = &_blocks[index];
_offset = index * 256;
_mem = ptr->data;
_size = 256;
_block_index = index;
//_blocks[block_idx].access_count++;
return ptr;
}
u8 InstrReelDyn::atU8(u64 ip) {
isize j = selectIndex(ip);
if (j >= _blocks.size()) return 0;
if (j != _block_index) {
this->selectBlock(j);
}
return _mem[ip - _offset];
}
u16 InstrReelDyn::atU16(u64 ip) {
isize j0 = selectIndex(ip);
isize j1 = selectIndex(ip + 1);
if (j1 >= _blocks.size()) return 0;
if (j0 == j1 && j0 != _block_index) {
selectBlock(j0);
}
if (j0 == j1 && j0 == _block_index) {
u16 dat;
spider::loadLE(&dat, _mem);
return dat;
}
// general case, first part
u16 dat = 0;
const u8 size = sizeof(u16);
// select first block and offset
selectBlock(j0);
u8 rem = ip % 256;
for (u8 n = 0; n < size; n++) {
dat |= _mem[rem++] << (n * 8);
ip++;
if (!rem) selectBlock(++j0);
}
return dat;
}
u32 InstrReelDyn::atU32(u64 ip) {
isize j0 = selectIndex(ip);
isize j1 = selectIndex(ip + 3);
if (j1 >= _blocks.size()) return 0;
if (j0 == j1 && j0 != _block_index) {
selectBlock(j0);
}
if (j0 == j1 && j0 == _block_index) {
u32 dat;
spider::loadLE(&dat, _mem);
return dat;
}
// general case, first part
u32 dat = 0;
const u8 size = sizeof(u32);
// select first block and offset
selectBlock(j0);
u8 rem = ip % 256;
for (u8 n = 0; n < size; n++) {
dat |= _mem[rem++] << (n * 8);
ip++;
if (!rem) selectBlock(++j0);
}
return dat;
}
u64 InstrReelDyn::atU64(u64 ip) {
isize j0 = selectIndex(ip);
isize j1 = selectIndex(ip + 3);
if (j1 >= _blocks.size()) return 0;
if (j0 == j1 && j0 != _block_index) {
selectBlock(j0);
}
if (j0 == j1 && j0 == _block_index) {
u64 dat;
spider::loadLE(&dat, _mem);
return dat;
}
// general case, first part
u64 dat = 0;
const u8 size = sizeof(u64);
// select first block and offset
selectBlock(j0);
u8 rem = ip % 256;
for (u8 n = 0; n < size; n++) {
dat |= _mem[rem++] << (n * 8);
ip++;
if (!rem) selectBlock(++j0);
}
return dat;
}
void InstrReelDyn::at(u64 ip, u8 dat) {}
void InstrReelDyn::at(u64 ip, u16 dat) {}
void InstrReelDyn::at(u64 ip, u32 dat) {}
void InstrReelDyn::at(u64 ip, u64 dat) {}
/**
* Appends instruction at location.
*/
void InstrReelDyn::append(u64 ip, u16 bc) {}
/**
* Appends instruction at the end.
*/
void InstrReelDyn::append(u16 bc) {}
/**
* Removes instruction at location.
*/
void InstrReelDyn::remove(u64 ip) {}
}

View File

@@ -1,110 +0,0 @@
#pragma once
#include <spider/runtime/cpu/InstrReel.hpp>
namespace spider {
/**
* Implements an instruction reel.
*/
class InstrReelDyn : public InstrReel {
private:
struct ReelBlock {
u8 data[256] = {};
};
private:
u64 _use_count;
isize _block_index;
std::deque<ReelBlock> _blocks;
public:
InstrReelDyn(u64 length);
InstrReelDyn(const u8* data, u64 length);
InstrReelDyn(const InstrReelDyn& copy);
InstrReelDyn(InstrReelDyn&& move) noexcept;
virtual ~InstrReelDyn();
public:
InstrReelDyn& operator=(const InstrReelDyn& copy);
InstrReelDyn& operator=(InstrReelDyn&& move) noexcept;
private:
isize selectIndex(u64 ip);
void growToFit(isize index);
ReelBlock* selectBlock(isize index);
public:
/**
* Obtains a byte of data at
* the specific location.
* Reindexing may occur, continous access
* may incurr in less penalties.
*/
virtual u8 atU8(u64 ip) override;
/**
* Obtains a byte of data at
* the specific location.
* Reindexing may occur, continous access
* may incurr in less penalties.
*/
virtual u16 atU16(u64 ip) override;
/**
* Obtains a byte of data at
* the specific location.
* Reindexing may occur, continous access
* may incurr in less penalties.
*/
virtual u32 atU32(u64 ip) override;
/**
* Obtains a byte of data at
* the specific location.
* Reindexing may occur, continous access
* may incurr in less penalties.
*/
virtual u64 atU64(u64 ip) override;
public:
void at(u64 ip, u8 dat);
void at(u64 ip, u16 dat);
void at(u64 ip, u32 dat);
void at(u64 ip, u64 dat);
/**
* Appends instruction at location.
*/
void append(u64 ip, u16 bc);
/**
* Appends instruction at the end.
*/
void append(u16 bc);
/**
* Removes instruction at location.
*/
void remove(u64 ip);
};
}

View File

@@ -1,149 +0,0 @@
#include "InstrReelFixed.hpp"
#include <spider/runtime/memory/Types.hpp>
#include <cstring>
namespace spider {
// Constructors & Destructors //
InstrReelFixed::InstrReelFixed(u64 length) {
this->_offset = 0;
this->_size = length;
this->_total_size = length;
if (_size > 0) {
_mem = new u8[_size];
std::memset(_mem, 0, _size);
}
}
InstrReelFixed::InstrReelFixed(const u8* data, u64 length) {
this->_offset = 0;
this->_size = length;
this->_total_size = length;
if (_size > 0) {
_mem = new u8[_size];
std::copy(data, data + _size, _mem);
}
}
InstrReelFixed::InstrReelFixed(const InstrReelFixed& other) {
_offset = other._offset;
_size = other._size;
_total_size = other._total_size;
_mem = new u8[_size];
std::copy(other._mem, other._mem + _size, _mem);
}
InstrReelFixed::InstrReelFixed(InstrReelFixed&& other) noexcept {
_mem = other._mem;
_offset = other._offset;
_size = other._size;
_total_size = other._total_size;
other._mem = nullptr;
other._offset = 0;
other._size = 0;
other._total_size = 0;
}
InstrReelFixed::~InstrReelFixed() {
delete[] _mem;
}
// Assign Operators //
InstrReelFixed& InstrReelFixed::operator=(const InstrReelFixed& other) {
if (this == &other) return *this; // lock self
u8* new_mem = new u8[other._size];
std::copy(other._mem, other._mem + other._size, new_mem);
delete[] _mem;
_mem = new_mem;
_offset = other._offset;
_size = other._size;
_total_size = other._total_size;
return *this;
}
InstrReelFixed& InstrReelFixed::operator=(InstrReelFixed&& other) noexcept {
if (this == &other) return *this; // lock self
delete[] _mem;
_mem = other._mem; // steal
_offset = other._offset;
_size = other._size;
_total_size = other._total_size;
other._mem = nullptr; // leave as husk
other._offset = 0;
other._size = 0;
other._total_size = 0;
return *this;
}
// Misc //
void InstrReelFixed::at(u64 ip, u8 dat) {
if(ip + 1 > _size) return;
_mem[ip] = dat;
}
void InstrReelFixed::at(u64 ip, u16 dat) {
if(ip + 2 > _size) return;
spider::storeLE(dat, _mem + ip);
}
void InstrReelFixed::at(u64 ip, u32 dat) {
if(ip + 4 > _size) return;
spider::storeLE(dat, _mem + ip);
}
void InstrReelFixed::at(u64 ip, u64 dat) {
if(ip + 8 > _size) return;
spider::storeLE(dat, _mem + ip);
}
void InstrReelFixed::resize(u64 new_size) {
// Special case 1
if (new_size == _size) return;
// Special case 2
if (new_size == 0) {
delete[] _mem;
_mem = nullptr;
_size = 0;
_total_size = 0;
return;
}
// 1. Allocate the new block
u8* new_mem = new u8[new_size];
// 2. Zero-initialize
std::memset(new_mem, 0, new_size);
// 3. Preserve data
// If shrinking, copy 'new_size' bytes. If growing, copy 'old_size' bytes.
u64 bytes_to_copy = (new_size < _size) ? new_size : _size;
// 3.1 Previous size could be zero, where _mem would be null
if (_mem != nullptr) {
std::copy(_mem, _mem + bytes_to_copy, new_mem);
}
// 4. Swap and Clean up
delete[] _mem;
_mem = new_mem;
_size = new_size;
_total_size = new_size;
}
}

View File

@@ -1,43 +0,0 @@
#pragma once
#include <spider/runtime/cpu/InstrReel.hpp>
namespace spider {
/**
* Implements an instruction reel.
*/
class InstrReelFixed : public InstrReel {
public:
InstrReelFixed(u64 length);
InstrReelFixed(const u8* data, u64 length);
InstrReelFixed(const InstrReelFixed& copy);
InstrReelFixed(InstrReelFixed&& move) noexcept;
virtual ~InstrReelFixed();
public:
InstrReelFixed& operator=(const InstrReelFixed& copy);
InstrReelFixed& operator=(InstrReelFixed&& move) noexcept;
public:
void at(u64 ip, u8 dat);
void at(u64 ip, u16 dat);
void at(u64 ip, u32 dat);
void at(u64 ip, u64 dat);
void resize(u64 new_size);
};
}