Commit 40cb8002 authored by Marius Wachtler's avatar Marius Wachtler

BST: switch to a gapless bytecode

- all instructions of the CFG are directly emitted a single memory region
- all instructions are packed and follow each other directly
- the CFGBlock just stores the offset into the bytecode where the first instruction of the block is located
- invokes are only a bit in the opcode field which when set means that the pointers to the normal and exc CFGBlocks* directly follow the normal instruction
parent 138433d8
......@@ -102,7 +102,7 @@ LivenessAnalysis::LivenessAnalysis(CFG* cfg, const CodeConstants& code_constants
for (CFGBlock* b : cfg->blocks) {
auto visitor = new LivenessBBVisitor(this); // livenessCache unique_ptr will delete it.
for (BST_stmt* stmt : b->body) {
for (BST_stmt* stmt : *b) {
stmt->accept(visitor);
}
liveness_cache.insert(std::make_pair(b, std::unique_ptr<LivenessBBVisitor>(visitor)));
......@@ -257,8 +257,8 @@ public:
void DefinednessBBAnalyzer::processBB(Map& starting, CFGBlock* block) const {
DefinednessVisitor visitor(code_constants, starting);
for (int i = 0; i < block->body.size(); i++) {
block->body[i]->accept(&visitor);
for (BST_stmt* stmt : *block) {
stmt->accept(&visitor);
}
if (VERBOSITY("analysis") >= 3) {
......
......@@ -101,8 +101,8 @@ private:
speculation(speculation) {}
void run() {
for (int i = 0; i < block->body.size(); i++) {
block->body[i]->accept_stmt(this);
for (BST_stmt* stmt : *block) {
stmt->accept_stmt(this);
}
}
......@@ -526,8 +526,6 @@ private:
getType(node->vreg_locals);
}
void visit_invoke(BST_Invoke* node) override { node->stmt->accept_stmt(this); }
void visit_jump(BST_Jump* node) override {}
void visit_print(BST_Print* node) override {
......
......@@ -74,6 +74,8 @@ public:
static Box* executeInner(ASTInterpreter& interpreter, CFGBlock* start_block, BST_stmt* start_at);
private:
Value executeStmt(BST_stmt* node);
Value createFunction(BST_FunctionDef* node, BoxedCode* node_code);
Value doBinOp(BST_stmt* node, Value left, Value right, int op, BinExpType exp_type);
void doStore(int vreg, STOLEN(Value) value);
......@@ -95,7 +97,6 @@ private:
Value visit_importfrom(BST_ImportFrom* node);
Value visit_importname(BST_ImportName* node);
Value visit_importstar(BST_ImportStar* node);
Value visit_invoke(BST_Invoke* node);
Value visit_jump(BST_Jump* node);
Value visit_landingpad(BST_Landingpad* node);
Value visit_list(BST_List* node);
......@@ -362,7 +363,7 @@ Box* ASTInterpreter::execJITedBlock(CFGBlock* b) {
} catch (ExcInfo e) {
--num_inside;
BST_stmt* stmt = getCurrentStatement();
if (stmt->type != BST_TYPE::Invoke)
if (!stmt->is_invoke())
throw e;
assert(getPythonFrameInfo(0) == getFrameInfo());
......@@ -370,7 +371,7 @@ Box* ASTInterpreter::execJITedBlock(CFGBlock* b) {
++getCode()->cxx_exception_count[stmt];
caughtCxxException(&e);
next_block = ((BST_Invoke*)stmt)->exc_dest;
next_block = stmt->get_exc_block();
last_exception = e;
}
return nullptr;
......@@ -384,7 +385,7 @@ Box* ASTInterpreter::executeInner(ASTInterpreter& interpreter, CFGBlock* start_b
assert((start_block == NULL) == (start_at == NULL));
if (start_block == NULL) {
start_block = interpreter.source_info->cfg->getStartingBlock();
start_at = start_block->body[0];
start_at = start_block->body();
}
// Important that this happens after RegisterHelper:
......@@ -395,7 +396,7 @@ Box* ASTInterpreter::executeInner(ASTInterpreter& interpreter, CFGBlock* start_b
if (!from_start) {
interpreter.current_block = start_block;
bool started = false;
for (auto s : start_block->body) {
for (auto s : *start_block) {
if (!started) {
if (s != start_at)
continue;
......@@ -404,7 +405,7 @@ Box* ASTInterpreter::executeInner(ASTInterpreter& interpreter, CFGBlock* start_b
interpreter.setCurrentStatement(s);
Py_XDECREF(v.o);
v = interpreter.visit_stmt(s);
v = interpreter.executeStmt(s);
}
} else {
interpreter.next_block = start_block;
......@@ -427,7 +428,7 @@ Box* ASTInterpreter::executeInner(ASTInterpreter& interpreter, CFGBlock* start_b
// check if we returned from the baseline JIT because we should do a OSR.
if (unlikely(rtn == (Box*)ASTInterpreterJitInterface::osr_dummy_value)) {
BST_Jump* cur_stmt = (BST_Jump*)interpreter.getCurrentStatement();
RELEASE_ASSERT(cur_stmt->type == BST_TYPE::Jump, "");
RELEASE_ASSERT(cur_stmt->type() == BST_TYPE::Jump, "");
// WARNING: do not put a try catch + rethrow block around this code here.
// it will confuse our unwinder!
rtn = interpreter.doOSR(cur_stmt);
......@@ -449,14 +450,14 @@ Box* ASTInterpreter::executeInner(ASTInterpreter& interpreter, CFGBlock* start_b
interpreter.startJITing(interpreter.current_block);
}
for (BST_stmt* s : interpreter.current_block->body) {
for (BST_stmt* s : *interpreter.current_block) {
interpreter.setCurrentStatement(s);
if (interpreter.jit)
interpreter.jit->emitSetCurrentInst(s);
if (v.o) {
Py_DECREF(v.o);
}
v = interpreter.visit_stmt(s);
v = interpreter.executeStmt(s);
}
}
return v.o;
......@@ -759,11 +760,14 @@ Box* ASTInterpreter::doOSR(BST_Jump* node) {
}
}
Value ASTInterpreter::visit_invoke(BST_Invoke* node) {
Value ASTInterpreter::executeStmt(BST_stmt* node) {
if (!node->is_invoke())
return visit_stmt(node);
Value v;
try {
v = visit_stmt(node->stmt);
next_block = node->normal_dest;
v = visit_stmt(node);
next_block = node->get_normal_block();
if (jit) {
jit->emitJump(next_block);
......@@ -778,7 +782,7 @@ Value ASTInterpreter::visit_invoke(BST_Invoke* node) {
++getCode()->cxx_exception_count[node];
caughtCxxException(&e);
next_block = node->exc_dest;
next_block = node->get_exc_block();
last_exception = e;
}
......@@ -909,7 +913,7 @@ Value ASTInterpreter::visit_stmt(BST_stmt* node) {
// to be careful to wrap pendingCallsCheckHelper, and it can signal that it was careful
// by returning from the function instead of breaking.
switch (node->type) {
switch (node->type()) {
case BST_TYPE::Assert:
visit_assert((BST_Assert*)node);
ASTInterpreterJitInterface::pendingCallsCheckHelper();
......@@ -977,9 +981,6 @@ Value ASTInterpreter::visit_stmt(BST_stmt* node) {
break;
case BST_TYPE::Jump:
return visit_jump((BST_Jump*)node);
case BST_TYPE::Invoke:
visit_invoke((BST_Invoke*)node);
break;
case BST_TYPE::SetExcInfo:
visit_setexcinfo((BST_SetExcInfo*)node);
ASTInterpreterJitInterface::pendingCallsCheckHelper();
......@@ -996,7 +997,7 @@ Value ASTInterpreter::visit_stmt(BST_stmt* node) {
// Handle all cases which are derived from BST_stmt_with_dest
default: {
Value v;
switch (node->type) {
switch (node->type()) {
case BST_TYPE::CopyVReg:
v = visit_copyvreg((BST_CopyVReg*)node);
break;
......@@ -1084,7 +1085,7 @@ Value ASTInterpreter::visit_stmt(BST_stmt* node) {
v = visit_makeslice((BST_MakeSlice*)node);
break;
default:
RELEASE_ASSERT(0, "not implemented %d", node->type);
RELEASE_ASSERT(0, "not implemented %d", node->type());
};
doStore(((BST_stmt_with_dest*)node)->vreg_dst, v);
ASTInterpreterJitInterface::pendingCallsCheckHelper();
......@@ -1425,14 +1426,14 @@ Value ASTInterpreter::visit_call(BST_Call* node) {
bool is_callattr = false;
bool callattr_clsonly = false;
int* vreg_elts = NULL;
if (node->type == BST_TYPE::CallAttr) {
if (node->type() == BST_TYPE::CallAttr) {
is_callattr = true;
callattr_clsonly = false;
auto* attr_ast = bst_cast<BST_CallAttr>(node);
func = getVReg(attr_ast->vreg_value);
attr = getCodeConstants().getInternedString(attr_ast->index_attr);
vreg_elts = bst_cast<BST_CallAttr>(node)->elts;
} else if (node->type == BST_TYPE::CallClsAttr) {
} else if (node->type() == BST_TYPE::CallClsAttr) {
is_callattr = true;
callattr_clsonly = true;
auto* attr_ast = bst_cast<BST_CallClsAttr>(node);
......@@ -2109,30 +2110,28 @@ extern "C" Box* astInterpretDeoptFromASM(BoxedCode* code, BST_stmt* enclosing_st
CFGBlock* start_block = NULL;
BST_stmt* starting_statement = NULL;
while (true) {
if (enclosing_stmt->type == BST_TYPE::Invoke) {
auto invoke = bst_cast<BST_Invoke>(enclosing_stmt);
start_block = invoke->normal_dest;
starting_statement = start_block->body[0];
enclosing_stmt = invoke->stmt;
} else if (enclosing_stmt->has_dest_vreg()) {
if (enclosing_stmt->is_invoke()) {
start_block = enclosing_stmt->get_normal_block();
starting_statement = start_block->body();
}
if (enclosing_stmt->has_dest_vreg()) {
int vreg_dst = ((BST_stmt_with_dest*)enclosing_stmt)->vreg_dst;
if (vreg_dst != VREG_UNDEFINED)
interpreter.addSymbol(vreg_dst, expr_val, true);
break;
} else {
RELEASE_ASSERT(0, "encountered an yet unimplemented opcode (got %d)", enclosing_stmt->type);
RELEASE_ASSERT(0, "encountered an yet unimplemented opcode (got %d)", enclosing_stmt->type());
}
}
if (start_block == NULL) {
// TODO innefficient
for (auto block : code->source->cfg->blocks) {
int n = block->body.size();
for (int i = 0; i < n; i++) {
if (block->body[i] == enclosing_stmt) {
ASSERT(i + 1 < n, "how could we deopt from a non-invoke terminator?");
for (auto it = block->begin(), it_end = block->end(); it != it_end; ++it) {
if (*it == enclosing_stmt) {
ASSERT(!(*it)->is_terminator(), "how could we deopt from a non-invoke terminator?");
start_block = block;
starting_statement = block->body[i + 1];
starting_statement = *(++it);
break;
}
}
......
......@@ -69,7 +69,7 @@ SourceInfo::SourceInfo(BoxedModule* m, ScopingResults scoping, FutureFlags futur
}
SourceInfo::~SourceInfo() {
// TODO: release memory..
delete cfg;
}
void FunctionAddressRegistry::registerFunction(const std::string& name, void* addr, int length,
......
......@@ -371,7 +371,7 @@ protected:
public:
static std::pair<SymbolTable*, bool /* created_new_sym_table */>
removeDestVRegsFromSymTable(const CodeConstants& code_constants, SymbolTable* sym_table, BST_Invoke* stmt) {
removeDestVRegsFromSymTable(const CodeConstants& code_constants, SymbolTable* sym_table, BST_stmt* stmt) {
SymTableDstVRegDeleter visitor(code_constants, sym_table);
stmt->accept(&visitor);
return std::make_pair(visitor.sym_table, visitor.created_new_sym_table);
......@@ -649,7 +649,7 @@ static void emitBBs(IRGenState* irstate, TypeAnalysis* types, const OSREntryDesc
// Function-entry safepoint:
// TODO might be more efficient to do post-call safepoints?
generator->doSafePoint(block->body[0]);
generator->doSafePoint(block->body());
} else if (entry_descriptor && block == entry_descriptor->backedge->target) {
assert(block->predecessors.size() > 1);
assert(osr_entry_block);
......@@ -742,13 +742,13 @@ static void emitBBs(IRGenState* irstate, TypeAnalysis* types, const OSREntryDesc
// are disallowed
auto pred = block->predecessors[0];
auto last_inst = pred->body.back();
auto last_inst = pred->getLastStmt();
SymbolTable* sym_table = ending_symbol_tables[pred];
bool created_new_sym_table = false;
if (last_inst->type == BST_TYPE::Invoke && bst_cast<BST_Invoke>(last_inst)->exc_dest == block)
if (last_inst->is_invoke() && last_inst->get_exc_block() == block)
std::tie(sym_table, created_new_sym_table) = SymTableDstVRegDeleter::removeDestVRegsFromSymTable(
irstate->getCodeConstants(), sym_table, bst_cast<BST_Invoke>(last_inst));
irstate->getCodeConstants(), sym_table, last_inst);
generator->copySymbolsFrom(sym_table);
for (auto&& p : *definedness_tables[pred]) {
......@@ -809,7 +809,7 @@ static void emitBBs(IRGenState* irstate, TypeAnalysis* types, const OSREntryDesc
if (predecessor->idx > block->idx) {
// Loop safepoint:
// TODO does it matter which side of the backedge these are on?
generator->doSafePoint(block->body[0]);
generator->doSafePoint(block->body());
break;
}
}
......@@ -824,9 +824,9 @@ static void emitBBs(IRGenState* irstate, TypeAnalysis* types, const OSREntryDesc
llvm_exit_blocks[block] = ending_st.ending_block;
if (ending_st.exception_state.size()) {
BST_stmt* last_stmt = block->body.back();
assert(last_stmt->type == BST_TYPE::Invoke);
CFGBlock* exc_block = bst_cast<BST_Invoke>(last_stmt)->exc_dest;
BST_stmt* last_stmt = block->getLastStmt();
assert(last_stmt->is_invoke());
CFGBlock* exc_block = last_stmt->get_exc_block();
assert(!incoming_exception_state.count(exc_block));
incoming_exception_state.insert(std::make_pair(exc_block, ending_st.exception_state));
......
......@@ -1098,14 +1098,14 @@ private:
InternedString attr;
CompilerVariable* func;
int* vreg_elts = NULL;
if (node->type == BST_TYPE::CallAttr) {
if (node->type() == BST_TYPE::CallAttr) {
is_callattr = true;
callattr_clsonly = false;
auto* attr_ast = bst_cast<BST_CallAttr>(node);
vreg_elts = bst_cast<BST_CallAttr>(node)->elts;
func = evalVReg(attr_ast->vreg_value);
attr = irstate->getCodeConstants().getInternedString(attr_ast->index_attr);
} else if (node->type == BST_TYPE::CallClsAttr) {
} else if (node->type() == BST_TYPE::CallClsAttr) {
is_callattr = true;
callattr_clsonly = true;
auto* attr_ast = bst_cast<BST_CallClsAttr>(node);
......@@ -2406,7 +2406,7 @@ private:
llvm::DebugLoc::get(node->lineno, 0, irstate->getFuncDbgInfo()));
}
switch (node->type) {
switch (node->type()) {
case BST_TYPE::Assert:
doAssert(bst_cast<BST_Assert>(node), unw_info);
break;
......@@ -2455,20 +2455,6 @@ private:
assert(!unw_info.hasHandler());
doJump(bst_cast<BST_Jump>(node), unw_info);
break;
case BST_TYPE::Invoke: {
assert(!unw_info.hasHandler());
BST_Invoke* invoke = bst_cast<BST_Invoke>(node);
doStmt(invoke->stmt, UnwindInfo(irstate->getCode(), node, entry_blocks[invoke->exc_dest]));
assert(state == RUNNING || state == DEAD);
if (state == RUNNING) {
emitter.getBuilder()->CreateBr(entry_blocks[invoke->normal_dest]);
endBlock(FINISHED);
}
break;
}
case BST_TYPE::Raise:
doRaise(bst_cast<BST_Raise>(node), unw_info);
break;
......@@ -2485,7 +2471,7 @@ private:
// Handle all cases which are derived from BST_stmt_with_dest
default: {
CompilerVariable* rtn = NULL;
switch (node->type) {
switch (node->type()) {
case BST_TYPE::CopyVReg:
rtn = evalAssign(bst_cast<BST_CopyVReg>(node), unw_info);
break;
......@@ -2573,7 +2559,7 @@ private:
rtn = evalMakeSlice(bst_cast<BST_MakeSlice>(node), unw_info);
break;
default:
printf("Unhandled stmt type at " __FILE__ ":" STRINGIFY(__LINE__) ": %d\n", node->type);
printf("Unhandled stmt type at " __FILE__ ":" STRINGIFY(__LINE__) ": %d\n", node->type());
exit(1);
}
rtn = evalSliceExprPost((BST_stmt_with_dest*)node, unw_info, rtn);
......@@ -2951,18 +2937,25 @@ public:
}
printf("\n");
}
for (int i = 0; i < block->body.size(); i++) {
for (BST_stmt* stmt : *block) {
if (state == DEAD)
break;
assert(state != FINISHED);
#if ENABLE_SAMPLING_PROFILER
auto stmt = block->body[i];
if (stmt->type != BST_TYPE::Landigpad && stmt->lineno > 0)
doSafePoint(block->body[i]);
doSafePoint(stmt);
#endif
if (stmt->is_invoke()) {
doStmt(stmt, UnwindInfo(irstate->getCode(), stmt, entry_blocks[stmt->get_exc_block()]));
doStmt(block->body[i], UnwindInfo(irstate->getCode(), block->body[i], NULL));
assert(state == RUNNING || state == DEAD);
if (state == RUNNING) {
emitter.getBuilder()->CreateBr(entry_blocks[stmt->get_normal_block()]);
endBlock(FINISHED);
}
} else
doStmt(stmt, UnwindInfo(irstate->getCode(), stmt, NULL));
}
if (VERBOSITY("irgenerator") >= 2) { // print ending symbol table
printf(" %d fini:", block->idx);
......
......@@ -35,7 +35,6 @@ class MDNode;
namespace pyston {
class BST_Invoke;
class CFGBlock;
class GCBuilder;
struct PatchpointInfo;
......
This diff is collapsed.
This diff is collapsed.
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -75,20 +75,68 @@ public:
// contains the address of the entry function
std::pair<CFGBlock*, Box*>(*entry_code)(void* interpeter, CFGBlock* block, Box** vregs);
llvm::SmallVector<BST_stmt*, 4> body;
llvm::SmallVector<CFGBlock*, 2> predecessors, successors;
int idx; // index in the CFG
const char* info;
int offset_of_first_stmt; // offset of this block into the bytecode array in bytes
typedef llvm::SmallVector<BST_stmt*, 4>::iterator iterator;
#ifndef NDEBUG
// only one block at a time is allowed to add instructions to the CFG
bool allowed_to_add_stuff = false;
#endif
CFGBlock(CFG* cfg, int idx) : cfg(cfg), code(NULL), entry_code(NULL), idx(idx), info(NULL) {}
CFGBlock(CFG* cfg, int idx, const char* info = NULL)
: cfg(cfg), code(NULL), entry_code(NULL), idx(idx), info(info), offset_of_first_stmt(-1) {}
BST_stmt* body() {
auto it = begin();
return it != end() ? *it : NULL;
}
int sizeInBytes() const {
int size = 0;
for (BST_stmt* stmt : *this) {
size += stmt->size_in_bytes();
}
return size;
}
BST_stmt* getLastStmt() const {
// TODO: this is inefficient
for (BST_stmt* stmt : *this) {
if (stmt->is_terminator())
return stmt;
}
return NULL;
}
bool isPlaced() const { return offset_of_first_stmt != -1; }
void connectTo(CFGBlock* successor, bool allow_backedge = false);
void unconnectFrom(CFGBlock* successor);
void push_back(BST_stmt* node) { body.push_back(node); }
void print(const CodeConstants& code_constants, llvm::raw_ostream& stream = llvm::outs());
class iterator {
private:
BST_stmt* stmt;
public:
iterator(BST_stmt* stmt) : stmt(stmt) {}
bool operator!=(const iterator& rhs) const { return stmt != rhs.stmt; }
bool operator==(const iterator& rhs) const { return stmt == rhs.stmt; }
iterator& operator++() __attribute__((always_inline)) {
if (likely(stmt)) {
if (unlikely(stmt->is_terminator()))
*this = CFGBlock::end();
else
stmt = (BST_stmt*)&((unsigned char*)stmt)[stmt->size_in_bytes()];
}
return *this;
}
BST_stmt* operator*() const { return stmt; }
};
inline iterator begin() const;
static iterator end() { return iterator(NULL); }
};
// the vregs are split into three parts.
......@@ -169,7 +217,7 @@ public:
bool hasVRegsAssigned() const { return num_vregs != -1; }
void assignVRegs(const CodeConstants& code_constants, CFG* cfg, const ParamNames& param_names,
llvm::DenseMap<int*, InternedString>& id_vreg);
llvm::DenseMap<class TrackingVRegPtr, InternedString>& id_vreg);
};
// Control Flow Graph
......@@ -180,22 +228,19 @@ private:
public:
std::vector<CFGBlock*> blocks;
BSTAllocator bytecode;
public:
CFG() : next_idx(0) {}
~CFG() {
for (auto&& block : blocks) {
delete block;
}
}
CFGBlock* getStartingBlock() { return blocks[0]; }
VRegInfo& getVRegInfo() { return vreg_info; }
CFGBlock* addBlock() {
int idx = next_idx;
next_idx++;
CFGBlock* block = new CFGBlock(this, idx);
blocks.push_back(block);
return block;
}
// Creates a block which must be placed later, using placeBlock().
// Must be placed on same CFG it was created on.
// You can also safely delete it without placing it.
......@@ -205,15 +250,43 @@ public:
}
void placeBlock(CFGBlock* block) {
assert(!block->isPlaced());
#ifndef NDEBUG
// check that there is no block with the same offset of first stmt
assert(!block->allowed_to_add_stuff);
std::unordered_map<int /* offset */, int> check_no_dup_blocks;
for (auto&& b : blocks) {
b->allowed_to_add_stuff = false;
++check_no_dup_blocks[b->offset_of_first_stmt];
}
++check_no_dup_blocks[bytecode.getSize()];
assert(check_no_dup_blocks[bytecode.getSize()] == 1);
for (auto&& e : check_no_dup_blocks) {
assert(e.second == 1);
}
#endif
assert(block->idx == -1);
block->idx = next_idx;
next_idx++;
blocks.push_back(block);
block->offset_of_first_stmt = bytecode.getSize();
#ifndef NDEBUG
block->allowed_to_add_stuff = true;
#endif
}
void print(const CodeConstants& code_constants, llvm::raw_ostream& stream = llvm::outs());
};
CFGBlock::iterator CFGBlock::begin() const {
if (offset_of_first_stmt >= cfg->bytecode.getSize())
return end();
return iterator((BST_stmt*)&cfg->bytecode.getData()[offset_of_first_stmt]);
}
class VRegSet {
private:
llvm::BitVector v;
......
......@@ -563,7 +563,7 @@ extern "C" int PyGen_NeedsFinalizing(PyGenObject* gen) noexcept {
return true;
// TODO: is this safe? probably not...
// return self->paused_frame_info->stmt->type == AST_TYPE::Invoke;
// return self->paused_frame_info->stmt->is_invoke();
#if 0
int i;
PyFrameObject* f = gen->gi_frame;
......
......@@ -27,8 +27,8 @@ protected:
static BoxedCode* getCodeObjectOfFirstMakeFunction(BoxedCode* module_code) {
BoxedCode* code = NULL;
for (BST_stmt* stmt : module_code->source->cfg->blocks[0]->body) {
if (stmt->type != BST_TYPE::MakeFunction)
for (BST_stmt* stmt : *module_code->source->cfg->getStartingBlock()) {
if (stmt->type() != BST_TYPE::MakeFunction)
continue;
code = module_code->code_constants.getFuncOrClass(bst_cast<BST_MakeFunction>(stmt)->index_func_def).second;
break;
......@@ -74,7 +74,7 @@ TEST_F(AnalysisTest, augassign) {
for (CFGBlock* block : cfg->blocks) {
//printf("%d\n", block->idx);
if (block->body.back()->type != BST_TYPE::Return)
if (block->getLastStmt()->type() != BST_TYPE::Return)
ASSERT_TRUE(liveness->isLiveAtEnd(vregs.getVReg(module->interned_strings->get("a")), block));
}
......@@ -116,10 +116,10 @@ void doOsrTest(bool is_osr, bool i_maybe_undefined) {
CFGBlock* loop_backedge = cfg->blocks[5];
ASSERT_EQ(6, loop_backedge->idx);
ASSERT_EQ(1, loop_backedge->body.size());
ASSERT_TRUE(loop_backedge->body()->is_terminator());
ASSERT_EQ(BST_TYPE::Jump, loop_backedge->body[0]->type);
BST_Jump* backedge = bst_cast<BST_Jump>(loop_backedge->body[0]);
ASSERT_EQ(BST_TYPE::Jump, loop_backedge->body()->type());
BST_Jump* backedge = bst_cast<BST_Jump>(loop_backedge->body());
ASSERT_LE(backedge->target->idx, loop_backedge->idx);
std::unique_ptr<PhiAnalysis> phis;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment