Equal -> Brace Initializer + Remove Constexpr Auto for Integers

This commit is contained in:
◱ PixelyIon
2020-09-26 10:47:57 +05:30
committed by ◱ PixelyIon
parent 2764bd7c96
commit 429af1990a
114 changed files with 675 additions and 684 deletions

View File

@ -7,7 +7,7 @@
namespace skyline {
namespace constant {
constexpr u32 GpfifoRegisterCount = 0x40; //!< The number of GPFIFO registers
constexpr u32 GpfifoRegisterCount{0x40}; //!< The number of GPFIFO registers
}
namespace gpu::engine {

View File

@ -161,11 +161,11 @@ namespace skyline::gpu::engine {
break;
case Registers::SemaphoreInfo::StructureSize::FourWords: {
// Convert the current nanosecond time to GPU ticks
constexpr u64 NsToTickNumerator = 384;
constexpr u64 NsToTickDenominator = 625;
constexpr u64 NsToTickNumerator{384};
constexpr u64 NsToTickDenominator{625};
u64 nsTime = util::GetTimeNs();
u64 timestamp = (nsTime / NsToTickDenominator) * NsToTickNumerator + ((nsTime % NsToTickDenominator) * NsToTickNumerator) / NsToTickDenominator;
u64 nsTime{util::GetTimeNs()};
u64 timestamp{(nsTime / NsToTickDenominator) * NsToTickNumerator + ((nsTime % NsToTickDenominator) * NsToTickNumerator) / NsToTickDenominator};
state.gpu->memoryManager.Write<FourWordResult>(FourWordResult{result, timestamp}, registers.semaphore.address.Pack());
break;

View File

@ -11,7 +11,7 @@
namespace skyline {
namespace constant {
constexpr u32 Maxwell3DRegisterCounter = 0xE00; //!< The number of Maxwell 3D registers
constexpr u32 Maxwell3DRegisterCounter{0xE00}; //!< The number of Maxwell 3D registers
}
namespace gpu::engine {

View File

@ -43,12 +43,12 @@ namespace skyline::gpu::gpfifo {
}
void GPFIFO::Process(const std::vector<u32> &segment) {
for (auto entry = segment.begin(); entry != segment.end(); entry++) {
for (auto entry{segment.begin()}; entry != segment.end(); entry++) {
// An entry containing all zeroes is a NOP, skip over it
if (*entry == 0)
continue;
auto methodHeader = reinterpret_cast<const PushBufferMethodHeader *>(&*entry);
auto methodHeader{reinterpret_cast<const PushBufferMethodHeader *>(&*entry)};
switch (methodHeader->secOp) {
case PushBufferMethodHeader::SecOp::IncMethod:
@ -80,7 +80,7 @@ namespace skyline::gpu::gpfifo {
void GPFIFO::Run() {
std::lock_guard lock(pushBufferQueueLock);
while (!pushBufferQueue.empty()) {
auto pushBuffer = pushBufferQueue.front();
auto pushBuffer{pushBufferQueue.front()};
if (pushBuffer.segment.empty())
pushBuffer.Fetch(state.gpu->memoryManager);

View File

@ -23,7 +23,7 @@ namespace skyline::gpu {
FORCE_INLINE bool MacroInterpreter::Step(Opcode *delayedOpcode) {
switch (opcode->operation) {
case Opcode::Operation::AluRegister: {
u32 result = HandleAlu(opcode->aluOperation, registers[opcode->srcA], registers[opcode->srcB]);
u32 result{HandleAlu(opcode->aluOperation, registers[opcode->srcA], registers[opcode->srcB])};
HandleAssignment(opcode->assignmentOperation, opcode->dest, result);
break;
@ -32,8 +32,8 @@ namespace skyline::gpu {
HandleAssignment(opcode->assignmentOperation, opcode->dest, registers[opcode->srcA] + opcode->immediate);
break;
case Opcode::Operation::BitfieldReplace: {
u32 src = registers[opcode->srcB];
u32 dest = registers[opcode->srcA];
u32 src{registers[opcode->srcB]};
u32 dest{registers[opcode->srcA]};
// Extract the source region
src = (src >> opcode->bitfield.srcBit) & opcode->bitfield.GetMask();
@ -48,25 +48,25 @@ namespace skyline::gpu {
break;
}
case Opcode::Operation::BitfieldExtractShiftLeftImmediate: {
u32 src = registers[opcode->srcB];
u32 dest = registers[opcode->srcA];
u32 src{registers[opcode->srcB]};
u32 dest{registers[opcode->srcA]};
u32 result = ((src >> dest) & opcode->bitfield.GetMask()) << opcode->bitfield.destBit;
u32 result{((src >> dest) & opcode->bitfield.GetMask()) << opcode->bitfield.destBit};
HandleAssignment(opcode->assignmentOperation, opcode->dest, result);
break;
}
case Opcode::Operation::BitfieldExtractShiftLeftRegister: {
u32 src = registers[opcode->srcB];
u32 dest = registers[opcode->srcA];
u32 src{registers[opcode->srcB]};
u32 dest{registers[opcode->srcA]};
u32 result = ((src >> opcode->bitfield.srcBit) & opcode->bitfield.GetMask()) << dest;
u32 result{((src >> opcode->bitfield.srcBit) & opcode->bitfield.GetMask()) << dest};
HandleAssignment(opcode->assignmentOperation, opcode->dest, result);
break;
}
case Opcode::Operation::ReadImmediate: {
u32 result = maxwell3D.registers.raw[registers[opcode->srcA] + opcode->immediate];
u32 result{maxwell3D.registers.raw[registers[opcode->srcA] + opcode->immediate]};
HandleAssignment(opcode->assignmentOperation, opcode->dest, result);
break;
}
@ -74,15 +74,15 @@ namespace skyline::gpu {
if (delayedOpcode != nullptr)
throw exception("Cannot branch while inside a delay slot");
u32 value = registers[opcode->srcA];
bool branch = (opcode->branchCondition == Opcode::BranchCondition::Zero) ? (value == 0) : (value != 0);
u32 value{registers[opcode->srcA]};
bool branch{(opcode->branchCondition == Opcode::BranchCondition::Zero) ? (value == 0) : (value != 0)};
if (branch) {
if (opcode->noDelay) {
opcode += opcode->immediate;
return true;
} else {
Opcode *targetOpcode = opcode + opcode->immediate;
Opcode *targetOpcode{opcode + opcode->immediate};
// Step into delay slot
opcode++;
@ -111,25 +111,25 @@ namespace skyline::gpu {
FORCE_INLINE u32 MacroInterpreter::HandleAlu(Opcode::AluOperation operation, u32 srcA, u32 srcB) {
switch (operation) {
case Opcode::AluOperation::Add: {
u64 result = static_cast<u64>(srcA) + srcB;
u64 result{static_cast<u64>(srcA) + srcB};
carryFlag = result >> 32;
return static_cast<u32>(result);
}
case Opcode::AluOperation::AddWithCarry: {
u64 result = static_cast<u64>(srcA) + srcB + carryFlag;
u64 result{static_cast<u64>(srcA) + srcB + carryFlag};
carryFlag = result >> 32;
return static_cast<u32>(result);
}
case Opcode::AluOperation::Subtract: {
u64 result = static_cast<u64>(srcA) - srcB;
u64 result{static_cast<u64>(srcA) - srcB};
carryFlag = result & 0xFFFFFFFF;
return static_cast<u32>(result);
}
case Opcode::AluOperation::SubtractWithBorrow: {
u64 result = static_cast<u64>(srcA) - srcB - !carryFlag;
u64 result{static_cast<u64>(srcA) - srcB - !carryFlag};
carryFlag = result & 0xFFFFFFFF;
return static_cast<u32>(result);

View File

@ -6,8 +6,8 @@
namespace skyline::gpu::vmm {
MemoryManager::MemoryManager(const DeviceState &state) : state(state) {
constexpr u64 GpuAddressSpaceSize = 1ul << 40; //!< The size of the GPU address space
constexpr u64 GpuAddressSpaceBase = 0x100000; //!< The base of the GPU address space - must be non-zero
constexpr u64 GpuAddressSpaceSize{1ul << 40}; //!< The size of the GPU address space
constexpr u64 GpuAddressSpaceBase{0x100000}; //!< The base of the GPU address space - must be non-zero
// Create the initial chunk that will be split to create new chunks
ChunkDescriptor baseChunk(GpuAddressSpaceBase, GpuAddressSpaceSize, 0, ChunkState::Unmapped);
@ -15,9 +15,9 @@ namespace skyline::gpu::vmm {
}
std::optional<ChunkDescriptor> MemoryManager::FindChunk(u64 size, ChunkState state) {
auto chunk = std::find_if(chunkList.begin(), chunkList.end(), [size, state](const ChunkDescriptor &chunk) -> bool {
auto chunk{std::find_if(chunkList.begin(), chunkList.end(), [size, state](const ChunkDescriptor &chunk) -> bool {
return chunk.size > size && chunk.state == state;
});
})};
if (chunk != chunkList.end())
return *chunk;
@ -26,12 +26,12 @@ namespace skyline::gpu::vmm {
}
u64 MemoryManager::InsertChunk(const ChunkDescriptor &newChunk) {
auto chunkEnd = chunkList.end();
for (auto chunk = chunkList.begin(); chunk != chunkEnd; chunk++) {
auto chunkEnd{chunkList.end()};
for (auto chunk{chunkList.begin()}; chunk != chunkEnd; chunk++) {
if (chunk->CanContain(newChunk)) {
auto oldChunk = *chunk;
u64 newSize = newChunk.address - chunk->address;
u64 extension = chunk->size - newSize - newChunk.size;
auto oldChunk{*chunk};
u64 newSize{newChunk.address - chunk->address};
u64 extension{chunk->size - newSize - newChunk.size};
if (newSize == 0) {
*chunk = newChunk;
@ -49,7 +49,7 @@ namespace skyline::gpu::vmm {
chunk->size = newChunk.address - chunk->address;
// Deletes all chunks that are within the chunk being inserted and split the final one
auto tailChunk = std::next(chunk);
auto tailChunk{std::next(chunk)};
while (tailChunk != chunkEnd) {
if (tailChunk->address + tailChunk->size >= newChunk.address + newChunk.size)
break;
@ -62,7 +62,7 @@ namespace skyline::gpu::vmm {
if (tailChunk == chunkEnd)
break;
u64 chunkSliceOffset = newChunk.address + newChunk.size - tailChunk->address;
u64 chunkSliceOffset{newChunk.address + newChunk.size - tailChunk->address};
tailChunk->address += chunkSliceOffset;
tailChunk->size -= chunkSliceOffset;
if (tailChunk->state == ChunkState::Mapped)
@ -70,7 +70,7 @@ namespace skyline::gpu::vmm {
// If the size of the head chunk is zero then we can directly replace it with our new one rather than inserting it
auto headChunk = std::prev(tailChunk);
auto headChunk{std::prev(tailChunk)};
if (headChunk->size == 0)
*headChunk = newChunk;
else
@ -85,11 +85,11 @@ namespace skyline::gpu::vmm {
u64 MemoryManager::ReserveSpace(u64 size) {
size = util::AlignUp(size, constant::GpuPageSize);
auto newChunk = FindChunk(size, ChunkState::Unmapped);
auto newChunk{FindChunk(size, ChunkState::Unmapped)};
if (!newChunk)
return 0;
auto chunk = *newChunk;
auto chunk{*newChunk};
chunk.size = size;
chunk.state = ChunkState::Reserved;
@ -107,11 +107,11 @@ namespace skyline::gpu::vmm {
u64 MemoryManager::MapAllocate(u64 address, u64 size) {
size = util::AlignUp(size, constant::GpuPageSize);
auto mappedChunk = FindChunk(size, ChunkState::Unmapped);
auto mappedChunk{FindChunk(size, ChunkState::Unmapped)};
if (!mappedChunk)
return 0;
auto chunk = *mappedChunk;
auto chunk{*mappedChunk};
chunk.cpuAddress = address;
chunk.size = size;
chunk.state = ChunkState::Mapped;
@ -132,9 +132,9 @@ namespace skyline::gpu::vmm {
if (!util::IsAligned(address, constant::GpuPageSize))
return false;
auto chunk = std::find_if(chunkList.begin(), chunkList.end(), [address](const ChunkDescriptor &chunk) -> bool {
auto chunk{std::find_if(chunkList.begin(), chunkList.end(), [address](const ChunkDescriptor &chunk) -> bool {
return chunk.address == address;
});
})};
if (chunk == chunkList.end())
return false;
@ -146,9 +146,9 @@ namespace skyline::gpu::vmm {
}
void MemoryManager::Read(u8 *destination, u64 address, u64 size) const {
auto chunk = std::upper_bound(chunkList.begin(), chunkList.end(), address, [](const u64 address, const ChunkDescriptor &chunk) -> bool {
auto chunk{std::upper_bound(chunkList.begin(), chunkList.end(), address, [](const u64 address, const ChunkDescriptor &chunk) -> bool {
return address < chunk.address;
});
})};
if (chunk == chunkList.end() || chunk->state != ChunkState::Mapped)
throw exception("Failed to read region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", address, size);
@ -176,9 +176,9 @@ namespace skyline::gpu::vmm {
}
void MemoryManager::Write(u8 *source, u64 address, u64 size) const {
auto chunk = std::upper_bound(chunkList.begin(), chunkList.end(), address, [](const u64 address, const ChunkDescriptor &chunk) -> bool {
auto chunk{std::upper_bound(chunkList.begin(), chunkList.end(), address, [](const u64 address, const ChunkDescriptor &chunk) -> bool {
return address < chunk.address;
});
})};
if (chunk == chunkList.end() || chunk->state != ChunkState::Mapped)
throw exception("Failed to write region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", address, size);

View File

@ -7,7 +7,7 @@
namespace skyline {
namespace constant {
constexpr u64 GpuPageSize = 1 << 16; //!< The page size of the GPU address space
constexpr u64 GpuPageSize{1 << 16}; //!< The page size of the GPU address space
}
namespace gpu::vmm {

View File

@ -7,7 +7,7 @@
namespace skyline {
namespace constant {
constexpr size_t MaxHwSyncpointCount = 192; //!< The maximum number of HOST1X syncpoints on t210
constexpr size_t MaxHwSyncpointCount{192}; //!< The maximum number of HOST1X syncpoints on t210
}
namespace gpu {

View File

@ -14,38 +14,38 @@ namespace skyline::gpu {
}
void Texture::SynchronizeHost() {
auto texture = state.process->GetPointer<u8>(guest->address);
auto size = format.GetSize(dimensions);
auto texture{state.process->GetPointer<u8>(guest->address)};
auto size{format.GetSize(dimensions)};
backing.resize(size);
auto output = reinterpret_cast<u8 *>(backing.data());
auto output{reinterpret_cast<u8 *>(backing.data())};
if (guest->tileMode == texture::TileMode::Block) {
// Reference on Block-linear tiling: https://gist.github.com/PixelyIon/d9c35050af0ef5690566ca9f0965bc32
constexpr u8 sectorWidth = 16; // The width of a sector in bytes
constexpr u8 sectorHeight = 2; // The height of a sector in lines
constexpr u8 gobWidth = 64; // The width of a GOB in bytes
constexpr u8 gobHeight = 8; // The height of a GOB in lines
constexpr u8 sectorWidth{16}; // The width of a sector in bytes
constexpr u8 sectorHeight{2}; // The height of a sector in lines
constexpr u8 gobWidth{64}; // The width of a GOB in bytes
constexpr u8 gobHeight{8}; // The height of a GOB in lines
auto blockHeight = guest->tileConfig.blockHeight; // The height of the blocks in GOBs
auto robHeight = gobHeight * blockHeight; // The height of a single ROB (Row of Blocks) in lines
auto surfaceHeight = dimensions.height / format.blockHeight; // The height of the surface in lines
auto surfaceHeightRobs = util::AlignUp(surfaceHeight, robHeight) / robHeight; // The height of the surface in ROBs (Row Of Blocks)
auto robWidthBytes = util::AlignUp((guest->tileConfig.surfaceWidth / format.blockWidth) * format.bpb, gobWidth); // The width of a ROB in bytes
auto robWidthBlocks = robWidthBytes / gobWidth; // The width of a ROB in blocks (and GOBs because block width == 1 on the Tegra X1)
auto robBytes = robWidthBytes * robHeight; // The size of a ROB in bytes
auto gobYOffset = robWidthBytes * gobHeight; // The offset of the next Y-axis GOB from the current one in linear space
auto blockHeight{guest->tileConfig.blockHeight}; // The height of the blocks in GOBs
auto robHeight{gobHeight * blockHeight}; // The height of a single ROB (Row of Blocks) in lines
auto surfaceHeight{dimensions.height / format.blockHeight}; // The height of the surface in lines
auto surfaceHeightRobs{util::AlignUp(surfaceHeight, robHeight) / robHeight}; // The height of the surface in ROBs (Row Of Blocks)
auto robWidthBytes{util::AlignUp((guest->tileConfig.surfaceWidth / format.blockWidth) * format.bpb, gobWidth)}; // The width of a ROB in bytes
auto robWidthBlocks{robWidthBytes / gobWidth}; // The width of a ROB in blocks (and GOBs because block width == 1 on the Tegra X1)
auto robBytes{robWidthBytes * robHeight}; // The size of a ROB in bytes
auto gobYOffset{robWidthBytes * gobHeight}; // The offset of the next Y-axis GOB from the current one in linear space
auto inputSector = texture; // The address of the input sector
auto outputRob = output; // The address of the output block
auto inputSector{texture}; // The address of the input sector
auto outputRob{output}; // The address of the output block
for (u32 rob = 0, y = 0, paddingY = 0; rob < surfaceHeightRobs; rob++) { // Every Surface contains `surfaceHeightRobs` ROBs
auto outputBlock = outputRob; // We iterate through a block independently of the ROB
for (u32 block = 0; block < robWidthBlocks; block++) { // Every ROB contains `surfaceWidthBlocks` Blocks
auto outputGob = outputBlock; // We iterate through a GOB independently of the block
for (u32 gobY = 0; gobY < blockHeight; gobY++) { // Every Block contains `blockHeight` Y-axis GOBs
for (u32 index = 0; index < sectorWidth * sectorHeight; index++) { // Every Y-axis GOB contains `sectorWidth * sectorHeight` sectors
u32 xT = ((index << 3) & 0b10000) | ((index << 1) & 0b100000); // Morton-Swizzle on the X-axis
u32 yT = ((index >> 1) & 0b110) | (index & 0b1); // Morton-Swizzle on the Y-axis
for (u32 rob{}, y{}, paddingY{}; rob < surfaceHeightRobs; rob++) { // Every Surface contains `surfaceHeightRobs` ROBs
auto outputBlock{outputRob}; // We iterate through a block independently of the ROB
for (u32 block{}; block < robWidthBlocks; block++) { // Every ROB contains `surfaceWidthBlocks` Blocks
auto outputGob{outputBlock}; // We iterate through a GOB independently of the block
for (u32 gobY{}; gobY < blockHeight; gobY++) { // Every Block contains `blockHeight` Y-axis GOBs
for (u32 index{}; index < sectorWidth * sectorHeight; index++) { // Every Y-axis GOB contains `sectorWidth * sectorHeight` sectors
u32 xT{((index << 3) & 0b10000) | ((index << 1) & 0b100000)}; // Morton-Swizzle on the X-axis
u32 yT{((index >> 1) & 0b110) | (index & 0b1)}; // Morton-Swizzle on the Y-axis
std::memcpy(outputGob + (yT * robWidthBytes) + xT, inputSector, sectorWidth);
inputSector += sectorWidth; // `sectorWidth` bytes are of sequential image data
}
@ -61,13 +61,13 @@ namespace skyline::gpu {
paddingY = (guest->tileConfig.blockHeight - blockHeight) * (sectorWidth * sectorWidth * sectorHeight); // Calculate the amount of padding between contiguous sectors
}
} else if (guest->tileMode == texture::TileMode::Pitch) {
auto sizeLine = guest->format.GetSize(dimensions.width, 1); // The size of a single line of pixel data
auto sizeStride = guest->format.GetSize(guest->tileConfig.pitch, 1); // The size of a single stride of pixel data
auto sizeLine{guest->format.GetSize(dimensions.width, 1)}; // The size of a single line of pixel data
auto sizeStride{guest->format.GetSize(guest->tileConfig.pitch, 1)}; // The size of a single stride of pixel data
auto inputLine = texture; // The address of the input line
auto outputLine = output; // The address of the output line
auto inputLine{texture}; // The address of the input line
auto outputLine{output}; // The address of the output line
for (u32 line = 0; line < dimensions.height; line++) {
for (u32 line{}; line < dimensions.height; line++) {
std::memcpy(outputLine, inputLine, sizeLine);
inputLine += sizeStride;
outputLine += sizeLine;

View File

@ -184,7 +184,7 @@ namespace skyline {
std::shared_ptr<PresentationTexture> InitializePresentationTexture() {
if (host)
throw exception("Trying to create multiple PresentationTexture objects from a single GuestTexture");
auto presentation = std::make_shared<PresentationTexture>(state, shared_from_this(), dimensions, format);
auto presentation{std::make_shared<PresentationTexture>(state, shared_from_this(), dimensions, format)};
host = std::static_pointer_cast<Texture>(presentation);
return presentation;
}