38 using namespace SVFUtil;
46 memSSA.setSaberCondAllocator(getSaberCondAllocator());
48 svfg = memSSA.buildFullSVFG(ander);
50 svfg = memSSA.buildPTROnlySVFG(ander);
51 setGraph(memSSA.getSVFG());
55 getSaberCondAllocator()->allocate(getPAG()->getModule());
69 iter != eiter; ++iter)
76 forwardTraverse(
item);
80 if (getCurSlice()->isReachGlobal())
82 DBOUT(
DSaber,
outs() <<
"Forward analysis reaches globals for slice:" << (*iter)->getId() <<
")\n");
86 DBOUT(
DSaber,
outs() <<
"Forward process for slice:" << (*iter)->getId() <<
" (size = " << getCurSlice()->getForwardSliceSize() <<
")\n");
89 getCurSlice()->sinksEnd(); sit != esit; ++sit)
93 backwardTraverse(
item);
96 DBOUT(
DSaber,
outs() <<
"Backward process for slice:" << (*iter)->getId() <<
" (size = " << getCurSlice()->getBackwardSliceSize() <<
")\n");
99 annotateSlice(_curSlice);
101 if(_curSlice->AllPathReachableSolve())
102 _curSlice->setAllReachable();
104 DBOUT(
DSaber,
outs() <<
"Guard computation for slice:" << (*iter)->getId() <<
")\n");
107 reportBug(getCurSlice());
121 bool reachFunExit =
false;
127 while (!worklist.
empty())
153 csIdSet.insert(getSVFG()->getCallSite(SVFUtil::cast<RetDirSVFGEdge>(edge)->getCallSiteId()));
161 if(SVFUtil::isa<IntraDirSVFGEdge>(edge))
170 else if(SVFUtil::isa<IntraIndSVFGEdge>(edge))
172 if(SVFUtil::isa<LoadSVFGNode, IntraMSSAPHISVFGNode>(succ))
194 DBOUT(
DSaber,
outs() <<
"\n##processing source: " << getCurSlice()->getSource()->getId() <<
" forward propagate from (" << edge->
getSrcID());
203 if(isGlobalSVFGNode(dstNode) || getCurSlice()->isReachGlobal())
205 getCurSlice()->setReachGlobal();
215 if(
const CallDirSVFGEdge* callEdge = SVFUtil::dyn_cast<CallDirSVFGEdge>(edge))
216 csId = callEdge->getCallSiteId();
218 csId = SVFUtil::cast<CallIndSVFGEdge>(edge)->getCallSiteId();
227 if(
const RetDirSVFGEdge* callEdge = SVFUtil::dyn_cast<RetDirSVFGEdge>(edge))
228 csId = callEdge->getCallSiteId();
230 csId = SVFUtil::cast<RetIndSVFGEdge>(edge)->getCallSiteId();
241 if(forwardVisited(dstNode,newItem))
247 addForwardVisited(dstNode, newItem);
249 if(pushIntoWorklist(newItem))
261 if(backwardVisited(srcNode))
264 addBackwardVisited(srcNode);
268 pushIntoWorklist(newItem);
274 if(_curSlice!=
nullptr)
281 _curSlice =
new ProgSlice(src,getSaberCondAllocator(), getSVFG());
286 getSVFG()->getStat()->addToSources(slice->
getSource());
288 getSVFG()->getStat()->addToSinks(*it);
290 getSVFG()->getStat()->addToForwardSlice(*it);
292 getSVFG()->getStat()->addToBackwardSlice(*it);
299 const_cast<SVFG*
>(getSVFG())->
dump(
"Slice",
true);
305 outs() <<
"Z3 Mem usage: " << getSaberCondAllocator()->getMemUsage() <<
"\n";
306 outs() <<
"Z3 Number: " << getSaberCondAllocator()->getCondNum() <<
"\n";
#define DBOUT(TYPE, X)
LLVM debug macros, define type of your DBUG model of each pass.
static AndersenWaveDiff * createAndersenWaveDiff(SVFIR *_pag)
Create an singleton instance directly instead of invoking llvm pass manager.
u32_t cxtSize() const
Get context size.
static void setMaxCxtLen(u32_t max)
set max context limit
const ContextCond & getContexts() const
Get context.
void pushContext(NodeID cxt)
Push context.
virtual bool matchContext(NodeID cxt)
Match context.
bool push(const Data &data)
NodeType * getSrcNode() const
NodeID getSrcID() const
get methods of the components
NodeType * getDstNode() const
iterator OutEdgeBegin()
iterators
static const Option< u32_t > CxtLimit
static const Option< bool > SABERFULLSVFG
static const Option< u32_t > MaxStepInWrapper
static const Option< bool > DumpSlice
PTACallGraph * getCallGraph() const
Return call graph.
SVFGNodeSetIter sinksEnd() const
SVFGNodeSetIter backwardSliceBegin() const
const SVFGNode * getSource() const
root and sink operations
SVFGNodeSetIter forwardSliceEnd() const
SVFGNodeSetIter sinksBegin() const
SVFGNodeSetIter forwardSliceBegin() const
SVFGNodeSetIter backwardSliceEnd() const
NodeID getId() const
Get ID.
static SVFIR * getPAG(bool buildFromFile=false)
Singleton design here to make sure we only have one instance during any analysis.
bool test(unsigned Idx) const
void BWProcessIncomingEdge(const DPIm &item, SVFGEdge *edge) override
Propagate information backward without matching context, as forward analysis already did it.
Set< const CallICFGNode * > CallSiteSet
void annotateSlice(ProgSlice *slice)
virtual void initialize(SVFModule *module)
Initialize analysis.
virtual void setCurSlice(const SVFGNode *src)
Slice operations.
virtual void analyze(SVFModule *module)
Start analysis here.
void dumpSlices()
Dump SVFG with annotated slice information.
void FWProcessOutgoingEdge(const DPIm &item, SVFGEdge *edge) override
Propagate information forward by matching context.
SVFGNodeSet::const_iterator SVFGNodeSetIter
bool isInAWrapper(const SVFGNode *src, CallSiteSet &csIdSet)
Identify allocation wrappers.
bool isRetVFGEdge() const
bool isCallVFGEdge() const
bool isRetDirectVFGEdge() const
bool isCallDirectVFGEdge() const
VFGEdge::VFGEdgeSetTy::const_iterator const_iterator
LLVM_NODISCARD bool isa(const Y &Val)
std::ostream & outs()
Overwrite llvm::outs()
void dump(const SparseBitVector< ElementSize > &LHS, std::ostream &out)