Rework GPU VMM variable naming

This commit is contained in:
Billy Laws
2020-11-08 20:14:14 +00:00
committed by ◱ PixelyIon
parent 4c9d453008
commit c7e5202042
7 changed files with 101 additions and 100 deletions

View File

@ -16,7 +16,7 @@ namespace skyline::gpu::vmm {
std::optional<ChunkDescriptor> MemoryManager::FindChunk(ChunkState state, u64 size, u64 alignment) {
auto chunk{std::find_if(chunks.begin(), chunks.end(), [state, size, alignment](const ChunkDescriptor &chunk) -> bool {
return (alignment ? util::IsAligned(chunk.address, alignment) : true) && chunk.size > size && chunk.state == state;
return (alignment ? util::IsAligned(chunk.virtAddr, alignment) : true) && chunk.size > size && chunk.state == state;
})};
if (chunk != chunks.end())
@ -30,7 +30,7 @@ namespace skyline::gpu::vmm {
for (auto chunk{chunks.begin()}; chunk != chunkEnd; chunk++) {
if (chunk->CanContain(newChunk)) {
auto oldChunk{*chunk};
u64 newSize{newChunk.address - chunk->address};
u64 newSize{newChunk.virtAddr - chunk->virtAddr};
u64 extension{chunk->size - newSize - newChunk.size};
if (newSize == 0) {
@ -41,16 +41,16 @@ namespace skyline::gpu::vmm {
}
if (extension)
chunks.insert(std::next(chunk), ChunkDescriptor(newChunk.address + newChunk.size, extension, (oldChunk.state == ChunkState::Mapped) ? (oldChunk.pointer + newSize + newChunk.size) : 0, oldChunk.state));
chunks.insert(std::next(chunk), ChunkDescriptor(newChunk.virtAddr + newChunk.size, extension, (oldChunk.state == ChunkState::Mapped) ? (oldChunk.cpuPtr + newSize + newChunk.size) : 0, oldChunk.state));
return newChunk.address;
} else if (chunk->address + chunk->size > newChunk.address) {
chunk->size = newChunk.address - chunk->address;
return newChunk.virtAddr;
} else if (chunk->virtAddr + chunk->size > newChunk.virtAddr) {
chunk->size = newChunk.virtAddr - chunk->virtAddr;
// Deletes all chunks that are within the chunk being inserted and split the final one
auto tailChunk{std::next(chunk)};
while (tailChunk != chunkEnd) {
if (tailChunk->address + tailChunk->size >= newChunk.address + newChunk.size)
if (tailChunk->virtAddr + tailChunk->size >= newChunk.virtAddr + newChunk.size)
break;
tailChunk = chunks.erase(tailChunk);
@ -61,11 +61,11 @@ namespace skyline::gpu::vmm {
if (tailChunk == chunkEnd)
break;
u64 chunkSliceOffset{newChunk.address + newChunk.size - tailChunk->address};
tailChunk->address += chunkSliceOffset;
u64 chunkSliceOffset{newChunk.virtAddr + newChunk.size - tailChunk->virtAddr};
tailChunk->virtAddr += chunkSliceOffset;
tailChunk->size -= chunkSliceOffset;
if (tailChunk->state == ChunkState::Mapped)
tailChunk->pointer += chunkSliceOffset;
tailChunk->cpuPtr += chunkSliceOffset;
// If the size of the head chunk is zero then we can directly replace it with our new one rather than inserting it
auto headChunk{std::prev(tailChunk)};
@ -74,7 +74,7 @@ namespace skyline::gpu::vmm {
else
chunks.insert(std::next(headChunk), newChunk);
return newChunk.address;
return newChunk.virtAddr;
}
}
@ -94,44 +94,44 @@ namespace skyline::gpu::vmm {
return InsertChunk(chunk);
}
u64 MemoryManager::ReserveFixed(u64 address, u64 size) {
if (!util::IsAligned(address, constant::GpuPageSize))
u64 MemoryManager::ReserveFixed(u64 virtAddr, u64 size) {
if (!util::IsAligned(virtAddr, constant::GpuPageSize))
return 0;
size = util::AlignUp(size, constant::GpuPageSize);
return InsertChunk(ChunkDescriptor(address, size, 0, ChunkState::Reserved));
return InsertChunk(ChunkDescriptor(virtAddr, size, nullptr, ChunkState::Reserved));
}
u64 MemoryManager::MapAllocate(u8 *pointer, u64 size) {
u64 MemoryManager::MapAllocate(u8 *cpuPtr, u64 size) {
size = util::AlignUp(size, constant::GpuPageSize);
auto mappedChunk{FindChunk(ChunkState::Unmapped, size)};
if (!mappedChunk)
return 0;
auto chunk{*mappedChunk};
chunk.pointer = pointer;
chunk.cpuPtr = cpuPtr;
chunk.size = size;
chunk.state = ChunkState::Mapped;
return InsertChunk(chunk);
}
u64 MemoryManager::MapFixed(u64 address, u8 *pointer, u64 size) {
if (!util::IsAligned(address, constant::GpuPageSize))
u64 MemoryManager::MapFixed(u64 virtAddr, u8 *cpuPtr, u64 size) {
if (!util::IsAligned(virtAddr, constant::GpuPageSize))
return false;
size = util::AlignUp(size, constant::GpuPageSize);
return InsertChunk(ChunkDescriptor(address, size, pointer, ChunkState::Mapped));
return InsertChunk(ChunkDescriptor(virtAddr, size, cpuPtr, ChunkState::Mapped));
}
bool MemoryManager::Unmap(u64 address, u64 size) {
if (!util::IsAligned(address, constant::GpuPageSize))
bool MemoryManager::Unmap(u64 virtAddr, u64 size) {
if (!util::IsAligned(virtAddr, constant::GpuPageSize))
return false;
try {
InsertChunk(ChunkDescriptor(address, size, 0, ChunkState::Unmapped));
InsertChunk(ChunkDescriptor(virtAddr, size, 0, ChunkState::Unmapped));
} catch (const std::exception &e) {
return false;
}
@ -139,19 +139,19 @@ namespace skyline::gpu::vmm {
return true;
}
void MemoryManager::Read(u8 *destination, u64 address, u64 size) const {
auto chunk{std::upper_bound(chunks.begin(), chunks.end(), address, [](const u64 address, const ChunkDescriptor &chunk) -> bool {
return address < chunk.address;
void MemoryManager::Read(u8 *destination, u64 virtAddr, u64 size) const {
auto chunk{std::upper_bound(chunks.begin(), chunks.end(), virtAddr, [](const u64 address, const ChunkDescriptor &chunk) -> bool {
return address < chunk.virtAddr;
})};
if (chunk == chunks.end() || chunk->state != ChunkState::Mapped)
throw exception("Failed to read region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", address, size);
throw exception("Failed to read region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", virtAddr, size);
chunk--;
u64 initialSize{size};
u64 chunkOffset{address - chunk->address};
u8 *source{chunk->pointer + chunkOffset};
u64 chunkOffset{virtAddr - chunk->virtAddr};
u8 *source{chunk->cpuPtr + chunkOffset};
u64 sourceSize{std::min(chunk->size - chunkOffset, size)};
// A continuous region in the GPU address space may be made up of several discontinuous regions in physical memory so we have to iterate over all chunks
@ -161,27 +161,27 @@ namespace skyline::gpu::vmm {
size -= sourceSize;
if (size) {
if (++chunk == chunks.end() || chunk->state != ChunkState::Mapped)
throw exception("Failed to read region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", address, size);
throw exception("Failed to read region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", virtAddr, size);
source = chunk->pointer;
source = chunk->cpuPtr;
sourceSize = std::min(chunk->size, size);
}
}
}
void MemoryManager::Write(u8 *source, u64 address, u64 size) const {
auto chunk{std::upper_bound(chunks.begin(), chunks.end(), address, [](const u64 address, const ChunkDescriptor &chunk) -> bool {
return address < chunk.address;
void MemoryManager::Write(u8 *source, u64 virtAddr, u64 size) const {
auto chunk{std::upper_bound(chunks.begin(), chunks.end(), virtAddr, [](const u64 address, const ChunkDescriptor &chunk) -> bool {
return address < chunk.virtAddr;
})};
if (chunk == chunks.end() || chunk->state != ChunkState::Mapped)
throw exception("Failed to write region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", address, size);
throw exception("Failed to write region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", virtAddr, size);
chunk--;
u64 initialSize{size};
u64 chunkOffset{address - chunk->address};
u8 *destination{chunk->pointer + chunkOffset};
u64 chunkOffset{virtAddr - chunk->virtAddr};
u8 *destination{chunk->cpuPtr + chunkOffset};
u64 destinationSize{std::min(chunk->size - chunkOffset, size)};
// A continuous region in the GPU address space may be made up of several discontinuous regions in physical memory so we have to iterate over all chunks
@ -191,9 +191,9 @@ namespace skyline::gpu::vmm {
size -= destinationSize;
if (size) {
if (++chunk == chunks.end() || chunk->state != ChunkState::Mapped)
throw exception("Failed to write region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", address, size);
throw exception("Failed to write region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", virtAddr, size);
destination = chunk->pointer;
destination = chunk->cpuPtr;
destinationSize = std::min(chunk->size, size);
}
}