Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- uint64_t PLH::ILCallback::getJitFunc(const asmjit::FuncSignature& sig, const PLH::ILCallback::tUserCallback callback, const uint64_t retAddr /* = 0 */) {
- /*AsmJit is smart enough to track register allocations and will forward
- the proper registers the right values and fixup any it dirtied earlier.
- This can only be done if it knows the signature, and ABI, so we give it
- them. It also only does this mapping for calls, so we need to generate
- calls on our boundaries of transfers when we want argument order correct
- (ABI stuff is managed for us when calling C code within this project via host mode).
- */
- asmjit::CodeHolder code;
- code.init(asmjit::CodeInfo(asmjit::ArchInfo::kIdHost));
- // initialize function
- asmjit::x86::Compiler cc(&code);
- cc.addFunc(sig);
- // to small to really need it
- cc.func()->frame().resetPreservedFP();
- // map argument slots to registers, following abi.
- std::vector<asmjit::x86::Reg> argRegisters;
- for (uint8_t arg_idx = 0; arg_idx < sig.argCount(); arg_idx++) {
- const uint8_t argType = sig.args()[arg_idx];
- asmjit::x86::Reg arg;
- if (isGeneralReg(argType)) {
- arg = cc.newInt32();
- } else if (isXmmReg(argType)) {
- arg = cc.newXmm();
- } else {
- ErrorLog::singleton().push("Parameters wider than 64bits not supported", ErrorLevel::SEV);
- return 0;
- }
- cc.setArg(arg_idx, arg);
- argRegisters.push_back(arg);
- }
- // setup the stack structure to hold arguments for user callback
- argsStack = cc.newStack((uint32_t)(sizeof(uint64_t) * sig.argCount()), 4);
- asmjit::x86::Mem argsStackIdx(argsStack);
- // assigns some register as index reg
- asmjit::x86::Gp i = cc.newUIntPtr();
- // stackIdx <- stack[i].
- argsStackIdx.setIndex(i);
- // r/w are sizeof(uint64_t) width now
- argsStackIdx.setSize(sizeof(uint64_t));
- // set i = 0
- cc.mov(i, 0);
- UNREFERENCED_PARAMETER(callback);
- //// mov from arguments registers into the stack structure
- for (uint8_t arg_idx = 0; arg_idx < sig.argCount(); arg_idx++) {
- const uint8_t argType = sig.args()[arg_idx];
- // have to cast back to explicit register types to gen right mov type
- if (isGeneralReg(argType)) {
- cc.mov(argsStackIdx, argRegisters.at(arg_idx).as<asmjit::x86::Gp>());
- } else if(isXmmReg(argType)) {
- cc.movq(argsStackIdx, argRegisters.at(arg_idx).as<asmjit::x86::Xmm>());
- } else {
- ErrorLog::singleton().push("Parameters wider than 64bits not supported", ErrorLevel::SEV);
- return 0;
- }
- // next structure slot (+= sizeof(uint64_t))
- cc.add(i, sizeof(uint64_t));
- }
- // get pointer to stack structure and pass it to the user callback
- asmjit::x86::Gp argStruct = cc.newUIntPtr("argStruct");
- cc.lea(argStruct, argsStack);
- // fill reg to pass struct arg count to callback
- asmjit::x86::Gp argCountParam = cc.newU8();
- cc.mov(argCountParam, (uint8_t)sig.argCount());
- // call to user provided function (use ABI of host compiler)
- auto call = cc.call(asmjit::Imm(static_cast<int64_t>((intptr_t)callback)), asmjit::FuncSignatureT<void, Parameters*, uint8_t>(asmjit::CallConv::kIdHost));
- call->setArg(0, argStruct);
- call->setArg(1, argCountParam);
- // deref the trampoline ptr (must live longer)
- asmjit::x86::Gp orig_ptr = cc.newUIntPtr();;
- cc.mov(orig_ptr, (uintptr_t)getTrampolineHolder());
- cc.mov(orig_ptr, asmjit::x86::ptr(orig_ptr));
- /*-- OPTIONALLY SPOOF RET ADDR --
- If the retAddr param is != 0 then we transfer via a push of dest addr, ret addr, and jmp.
- Other wise we just call. Potentially useful for defensive binaries.
- */
- /*unsigned char* retBufTmp = (unsigned char*)m_mem.getBlock(10);
- *(unsigned char*)retBufTmp = 0xC3;*/
- uint64_t retAddrReal = retAddr;
- //retAddrReal = (uint64_t)retBufTmp;
- if (retAddrReal == 0) {
- /* call trampoline, map input args same order they were passed to us.*/
- auto orig_call = cc.call(orig_ptr, sig);
- for (uint8_t arg_idx = 0; arg_idx < sig.argCount(); arg_idx++) {
- orig_call->setArg(arg_idx, argRegisters.at(arg_idx));
- }
- cc.endFunc();
- cc.finalize();
- } else {
- //asmjit::Label ret_jit_stub = cc.newLabel();
- //asmjit::X86Gp tmpReg = cc.newUIntPtr();
- //cc.lea(tmpReg, asmjit::x86::ptr(ret_jit_stub));
- //
- //cc.push(tmpReg); // push ret
- //cc.push((uintptr_t)retAddrReal); // push &ret_inst
- //cc.jmp(orig_ptr); // jmp orig
- //cc.bind(ret_jit_stub); // ret_inst:
- //cc.endFunc(); // omit prolog cleanup
- //cc.finalize();
- }
- // end function
- // worst case, overestimates for case trampolines needed
- code.flatten();
- size_t size = code.codeSize();
- // Allocate a virtual memory (executable).
- m_callbackBuf = (uint64_t)m_mem.getBlock(size);
- if (!m_callbackBuf) {
- __debugbreak();
- return 0;
- }
- if (code.hasUnresolvedLinks()) {
- code.resolveUnresolvedLinks();
- }
- // Relocate to the base-address of the allocated memory.
- code.relocateToBase(m_callbackBuf);
- code.copyFlattenedData((unsigned char*)m_callbackBuf, size);
- return m_callbackBuf;
- }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement