Enzyme main
Loading...
Searching...
No Matches
CacheUtility.h
Go to the documentation of this file.
1//===- CacheUtility.h - Caching values in the forward pass for later use
2//---===//
3//
4// Enzyme Project
5//
6// Part of the Enzyme Project, under the Apache License v2.0 with LLVM
7// Exceptions. See https://llvm.org/LICENSE.txt for license information.
8// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
9//
10// If using this code in an academic setting, please cite the following:
11// @incollection{enzymeNeurips,
12// title = {Instead of Rewriting Foreign Code for Machine Learning,
13// Automatically Synthesize Fast Gradients},
14// author = {Moses, William S. and Churavy, Valentin},
15// booktitle = {Advances in Neural Information Processing Systems 33},
16// year = {2020},
17// note = {To appear in},
18// }
19//
20//===----------------------------------------------------------------------===//
21//
22// This file declares a base helper class CacheUtility that manages the cache
23// of values from the forward pass for later use.
24//
25//===----------------------------------------------------------------------===//
26
27#ifndef ENZYME_CACHE_UTILITY_H
28#define ENZYME_CACHE_UTILITY_H
29
30#include <llvm/Config/llvm-config.h>
31#if LLVM_VERSION_MAJOR >= 16
32#define private public
33#include "llvm/Analysis/ScalarEvolution.h"
34#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
35#undef private
36#else
37#include "SCEV/ScalarEvolution.h"
38#include "SCEV/ScalarEvolutionExpander.h"
39#endif
40
41#include "llvm/ADT/STLExtras.h"
42#include "llvm/ADT/SmallPtrSet.h"
43#include "llvm/Analysis/LoopInfo.h"
44#include "llvm/IR/Instructions.h"
45
46#include "llvm/Analysis/AssumptionCache.h"
47#include "llvm/Analysis/TargetLibraryInfo.h"
48#include "llvm/IR/Dominators.h"
49#include "llvm/Support/CommandLine.h"
50#include "llvm/Transforms/Utils/ValueMapper.h"
51
52#include "FunctionUtils.h"
54
55extern "C" {
56/// Pack 8 bools together in a single byte
57extern llvm::cl::opt<bool> EfficientBoolCache;
58
59extern llvm::cl::opt<bool> EnzymeZeroCache;
60}
61
62/// Container for all loop information to synthesize gradients
64 /// Canonical induction variable of the loop
65 llvm::AssertingVH<llvm::PHINode> var;
66
67 /// Increment of the induction
68 llvm::AssertingVH<llvm::Instruction> incvar;
69
70 /// Allocation of induction variable of reverse pass
71 llvm::AssertingVH<llvm::AllocaInst> antivaralloc;
72
73 /// Header of this loop
74 llvm::BasicBlock *header;
75
76 /// Preheader of this loop
77 llvm::BasicBlock *preheader;
78
79 /// Whether this loop has a statically analyzable number of iterations
80 bool dynamic;
81
82 /// limit is last value of a canonical induction variable
83 /// iters is number of times loop is run (thus iters = limit + 1)
85
87
88 /// An offset to add to the index when getting the cache pointer.
90
91 /// An overriding allocation limit size.
93
94 /// All blocks this loop exits too
95 llvm::SmallPtrSet<llvm::BasicBlock *, 8> exitBlocks;
96
97 /// Parent loop of this loop
98 llvm::Loop *parent;
99};
100static inline bool operator==(const LoopContext &lhs, const LoopContext &rhs) {
101 return lhs.parent == rhs.parent;
102}
103
104/// Modes of potential unwraps
105enum class UnwrapMode {
106 // It is already known that it is legal to fully unwrap
107 // this instruction. This means unwrap this instruction,
108 // its operands, etc. However, this will stop at known
109 // cached available from a tape.
111 // Unlike LegalFullUnwrap, this will unwrap through a tape
113 // Attempt to fully unwrap this, looking up whenever it
114 // is not legal to unwrap
116 // Attempt to fully unwrap this
118 // Unwrap the current instruction but not its operand
120};
121
122static inline llvm::raw_ostream &operator<<(llvm::raw_ostream &os,
123 UnwrapMode mode) {
124 switch (mode) {
126 os << "LegalFullUnwrap";
127 break;
129 os << "LegalFullUnwrapNoTapeReplace";
130 break;
132 os << "AttemptFullUnwrapWithLookup";
133 break;
135 os << "AttemptFullUnwrap";
136 break;
138 os << "AttemptSingleUnwrap";
139 break;
140 }
141 return os;
142}
143
145public:
146 /// The function whose instructions we are caching
147 llvm::Function *const newFunc;
148
149 /// Various analysis results of newFunc
150 llvm::TargetLibraryInfo &TLI;
151 llvm::DominatorTree DT;
152
153protected:
154 llvm::LoopInfo LI;
155 llvm::AssumptionCache AC;
157
158public:
159 // Helper basicblock where all new allocations will be added to
160 // This includes allocations for cache variables
161 llvm::BasicBlock *inversionAllocs;
162
163protected:
164 CacheUtility(llvm::TargetLibraryInfo &TLI, llvm::Function *newFunc)
165 : newFunc(newFunc), TLI(TLI), DT(*newFunc), LI(DT), AC(*newFunc),
166 SE(*newFunc, TLI, AC, DT, LI) {
167 inversionAllocs = llvm::BasicBlock::Create(newFunc->getContext(),
168 "allocsForInversion", newFunc);
169 }
170
171public:
172 virtual ~CacheUtility();
173
174protected:
175 /// Map of Loop to requisite loop information needed for AD (forward/reverse
176 /// induction/etc)
177 std::map<llvm::Loop *, LoopContext> loopContexts;
178
179public:
180 /// Given a BasicBlock BB in newFunc, set loopContext to the relevant
181 /// contained loop and return true. If BB is not in a loop, return false
182 bool getContext(llvm::BasicBlock *BB, LoopContext &loopContext,
183 bool ReverseLimit);
184 /// Return whether the given instruction is used as necessary as part of a
185 /// loop context This includes as the canonical induction variable or
186 /// increment
187 bool isInstructionUsedInLoopInduction(llvm::Instruction &I) {
188 for (auto &context : loopContexts) {
189 if (context.second.var == &I || context.second.incvar == &I ||
190 context.second.maxLimit == &I || context.second.trueLimit == &I) {
191 return true;
192 }
193 }
194 return false;
195 }
196
197 llvm::AllocaInst *getDynamicLoopLimit(llvm::Loop *L,
198 bool ReverseLimit = true);
199
200 /// Print out all currently cached values
201 void dumpScope() {
202 llvm::errs() << "scope:\n";
203 for (auto a : scopeMap) {
204 llvm::errs() << " scopeMap[" << *a.first << "] = " << *a.second.first
205 << " ctx:" << a.second.second.Block->getName() << "\n";
206 }
207 llvm::errs() << "end scope\n";
208 }
209
210 unsigned getCacheAlignment(unsigned bsize) const {
211 if ((bsize & (bsize - 1)) == 0) {
212 if (bsize > 8)
213 return 8;
214 else
215 return bsize;
216 } else if (bsize > 0 && bsize % 8 == 0) {
217 return 8;
218 } else if (bsize > 0 && bsize % 4 == 0) {
219 return 4;
220 } else if (bsize > 0 && bsize % 2 == 0) {
221 return 2;
222 } else
223 return 1;
224 }
225
226 /// Erase this instruction both from LLVM modules and any local
227 /// data-structures
228 virtual void erase(llvm::Instruction *I);
229 /// Replace this instruction both in LLVM modules and any local
230 /// data-structures
231 virtual void replaceAWithB(llvm::Value *A, llvm::Value *B,
232 bool storeInCache = false);
233
234 // Context information to request calculation of loop limit information
236 // Whether the limit needs to be accessible for a reverse pass
238
239 // A block inside of the loop, defining the location
240 llvm::BasicBlock *Block;
241 // Instead of getting the actual limits, return a limit of one
243
248 };
249
250 /// Given a LimitContext ctx, representing a location inside a loop nest,
251 /// break each of the loops up into chunks of loops where each chunk's number
252 /// of iterations can be computed at the chunk preheader. Every dynamic loop
253 /// defines the start of a chunk. SubLimitType is a vector of chunk objects.
254 /// More specifically it is a vector of { # iters in a Chunk (sublimit), Chunk
255 /// } Each chunk object is a vector of loops contained within the chunk. For
256 /// every loop, this returns pair of the LoopContext and the limit of that
257 /// loop Both the vector of Chunks and vector of Loops within a Chunk go from
258 /// innermost loop to outermost loop.
259 typedef llvm::SmallVector<std::pair<
260 /*sublimit*/ llvm::Value *,
261 /*loop limits*/ llvm::SmallVector<
262 std::pair<LoopContext, llvm::Value *>, 4>>,
263 0>
265 SubLimitType getSubLimits(bool inForwardPass, llvm::IRBuilder<> *RB,
266 LimitContext ctx, llvm::Value *extraSize = nullptr);
267
268private:
269 /// Internal data structure used by getSubLimit to avoid computing the same
270 /// loop limit multiple times if possible. Map's a desired limitMinus1 (see
271 /// getSubLimits) and the block the true limit requested to the value of the
272 /// limit accessible at that block
273 llvm::ValueMap<llvm::Value *,
274 std::map<llvm::BasicBlock *, llvm::WeakTrackingVH>>
275 LimitCache;
276 /// Internal data structure used by getSubLimit to avoid computing the
277 /// cumulative loop limit multiple times if possible. Map's a desired pair of
278 /// operands to be multiplied (see getSubLimits) and the block the cumulative
279 /// limit requested to the value of the limit accessible at that block This
280 /// cache is also shared with computeIndexOfChunk
281 std::map<std::tuple<llvm::Value *, llvm::Value *, llvm::BasicBlock *>,
282 llvm::Value *>
283 SizeCache;
284
285 /// Given a loop context, compute the corresponding index into said loop at
286 /// the IRBuilder<>
287 llvm::Value *computeIndexOfChunk(
288 bool inForwardPass, llvm::IRBuilder<> &v,
289 llvm::ArrayRef<std::pair<LoopContext, llvm::Value *>> containedloops,
290 const llvm::ValueToValueMapTy &available);
291
292private:
293 /// Given a cache allocation and an index denoting how many Chunks deep the
294 /// allocation is being indexed into, return the invariant metadata describing
295 /// used to describe loads/stores to the indexed pointer
296 /// Note that the cache allocation should either be an allocainst (if in
297 /// fwd/both) or an extraction from the tape
298 std::map<std::pair<llvm::Value *, int>, llvm::MDNode *>
299 CachePointerInvariantGroups;
300 /// Given a value being cached, return the invariant metadata of any
301 /// loads/stores to memory storing that value
302 std::map<llvm::Value *, llvm::MDNode *> ValueInvariantGroups;
303
304protected:
305 /// A map of values being cached to their underlying allocation/limit context
306 std::map<llvm::Value *,
307 std::pair<llvm::AssertingVH<llvm::AllocaInst>, LimitContext>>
309
310 /// A map of allocations to a vector of instruction used to create by the
311 /// allocation Keeping track of these values is useful for deallocation. This
312 /// is stored as a vector explicitly to order theses instructions in such a
313 /// way that they can be erased by iterating in reverse order.
314 std::map<llvm::AllocaInst *,
315 llvm::SmallVector<llvm::AssertingVH<llvm::Instruction>, 4>>
317
318 /// A map of allocations to a set of instructions which free memory as part of
319 /// the cache.
320 std::map<llvm::AllocaInst *, std::set<llvm::AssertingVH<llvm::CallInst>>>
322
323 /// A map of allocations to a set of instructions which allocate memory as
324 /// part of the cache
325 std::map<llvm::AllocaInst *,
326 llvm::SmallVector<llvm::AssertingVH<llvm::CallInst>, 4>>
328
329 /// Perform the final load from the cache, applying requisite invariant
330 /// group and alignment
331 llvm::Value *loadFromCachePointer(llvm::Type *T, llvm::IRBuilder<> &BuilderM,
332 llvm::Value *cptr, llvm::Value *cache);
333
334public:
335 /// Create a cache of Type T at the given LimitContext. If allocateInternal is
336 /// set this will allocate the requesite memory. If extraSize is set,
337 /// allocations will be a factor of extraSize larger
338 llvm::AllocaInst *createCacheForScope(LimitContext ctx, llvm::Type *T,
339 llvm::StringRef name, bool shouldFree,
340 bool allocateInternal = true,
341 llvm::Value *extraSize = nullptr);
342
343 /// High-level utility to "unwrap" an instruction at a new location specified
344 /// by BuilderM. Depending on the mode, it will either just unwrap this
345 /// instruction, all of its instructions operands, and optionally lookup
346 /// values when it is not legal to unwrap. If a value cannot be unwrap'd at a
347 /// given location, this will null. This high-level utility should be
348 /// implemented based off the low-level caching infrastructure provided in
349 /// this class.
350 virtual llvm::Value *
351 unwrapM(llvm::Value *const val, llvm::IRBuilder<> &BuilderM,
352 const llvm::ValueToValueMapTy &available, UnwrapMode mode,
353 llvm::BasicBlock *scope = nullptr, bool permitCache = true) = 0;
354
355 /// High-level utility to get the value an instruction at a new location
356 /// specified by BuilderM. Unlike unwrap, this function can never fail --
357 /// falling back to creating a cache if necessary. This function is
358 /// prepopulated with a set of values that are already known to be available
359 /// and may contain optimizations for getting the value in more efficient ways
360 /// (e.g. unwrap'ing when legal, looking up equivalent values, etc). This
361 /// high-level utility should be implemented based off the low-level caching
362 /// infrastructure provided in this class.
363 virtual llvm::Value *
364 lookupM(llvm::Value *val, llvm::IRBuilder<> &BuilderM,
365 const llvm::ValueToValueMapTy &incoming_availalble =
366 llvm::ValueToValueMapTy(),
367 bool tryLegalityCheck = true, llvm::BasicBlock *scope = nullptr) = 0;
368
369 virtual bool assumeDynamicLoopOfSizeOne(llvm::Loop *L) const = 0;
370
371 /// If an allocation is requested to be freed, this subclass will be called to
372 /// chose how and where to free it. It is by default not implemented, falling
373 /// back to an error. Subclasses who want to free memory should implement this
374 /// function.
375 virtual llvm::CallInst *freeCache(llvm::BasicBlock *forwardPreheader,
376 const SubLimitType &antimap, int i,
377 llvm::AllocaInst *alloc, llvm::Type *myType,
378 llvm::ConstantInt *byteSizeOfType,
379 llvm::Value *storeInto,
380 llvm::MDNode *InvariantMD) {
381 assert(0 && "freeing cache not handled in this scenario");
382 llvm_unreachable("freeing cache not handled in this scenario");
383 }
384
385 /// Given an allocation defined at a particular ctx, store the value val
386 /// in the cache at the location defined in the given builder
387 void storeInstructionInCache(LimitContext ctx, llvm::IRBuilder<> &BuilderM,
388 llvm::Value *val, llvm::AllocaInst *cache,
389 llvm::MDNode *TBAA = nullptr);
390
391 /// Given an allocation defined at a particular ctx, store the instruction
392 /// in the cache right after the instruction is executed
393 void storeInstructionInCache(LimitContext ctx, llvm::Instruction *inst,
394 llvm::AllocaInst *cache,
395 llvm::MDNode *TBAA = nullptr);
396
397 /// Given an allocation specified by the LimitContext ctx and cache, compute a
398 /// pointer that can hold the underlying type being cached. This value should
399 /// be computed at BuilderM. Optionally, instructions needed to generate this
400 /// pointer can be stored in scopeInstructions
401 llvm::Value *getCachePointer(llvm::Type *T, bool inForwardPass,
402 llvm::IRBuilder<> &BuilderM, LimitContext ctx,
403 llvm::Value *cache, bool storeInInstructionsMap,
404 const llvm::ValueToValueMapTy &available,
405 llvm::Value *extraSize);
406
407 /// Given an allocation specified by the LimitContext ctx and cache, lookup
408 /// the underlying cached value.
409 llvm::Value *lookupValueFromCache(llvm::Type *T, bool inForwardPass,
410 llvm::IRBuilder<> &BuilderM,
411 LimitContext ctx, llvm::Value *cache,
412 bool isi1,
413 const llvm::ValueToValueMapTy &available,
414 llvm::Value *extraSize = nullptr,
415 llvm::Value *extraOffset = nullptr);
416
417protected:
418 // List of values loaded from the cache
419 llvm::SmallPtrSet<llvm::LoadInst *, 10> CacheLookups;
420};
421
422// Create a new canonical induction variable of Type Ty for Loop L
423// Return the variable and the increment instruction
424std::pair<llvm::PHINode *, llvm::Instruction *>
425InsertNewCanonicalIV(llvm::Loop *L, llvm::Type *Ty,
426 const llvm::Twine &Name = "iv");
427
428// Attempt to rewrite all phinode's in the loop in terms of the
429// induction variable
431 llvm::BasicBlock *Header, llvm::PHINode *CanonicalIV,
432 llvm::Instruction *Increment, MustExitScalarEvolution &SE,
433 llvm::function_ref<void(llvm::Instruction *, llvm::Value *)> replacer,
434 llvm::function_ref<void(llvm::Instruction *)> eraser);
435#endif
std::pair< llvm::PHINode *, llvm::Instruction * > InsertNewCanonicalIV(llvm::Loop *L, llvm::Type *Ty, const llvm::Twine &Name="iv")
llvm::cl::opt< bool > EnzymeZeroCache
llvm::cl::opt< bool > EfficientBoolCache
Pack 8 bools together in a single byte.
UnwrapMode
Modes of potential unwraps.
@ AttemptSingleUnwrap
@ LegalFullUnwrapNoTapeReplace
@ AttemptFullUnwrapWithLookup
void RemoveRedundantIVs(llvm::BasicBlock *Header, llvm::PHINode *CanonicalIV, llvm::Instruction *Increment, MustExitScalarEvolution &SE, llvm::function_ref< void(llvm::Instruction *, llvm::Value *)> replacer, llvm::function_ref< void(llvm::Instruction *)> eraser)
static llvm::raw_ostream & operator<<(llvm::raw_ostream &os, UnwrapMode mode)
static bool operator==(const LoopContext &lhs, const LoopContext &rhs)
llvm::Function *const newFunc
The function whose instructions we are caching.
MustExitScalarEvolution SE
virtual llvm::Value * unwrapM(llvm::Value *const val, llvm::IRBuilder<> &BuilderM, const llvm::ValueToValueMapTy &available, UnwrapMode mode, llvm::BasicBlock *scope=nullptr, bool permitCache=true)=0
High-level utility to "unwrap" an instruction at a new location specified by BuilderM.
virtual llvm::CallInst * freeCache(llvm::BasicBlock *forwardPreheader, const SubLimitType &antimap, int i, llvm::AllocaInst *alloc, llvm::Type *myType, llvm::ConstantInt *byteSizeOfType, llvm::Value *storeInto, llvm::MDNode *InvariantMD)
If an allocation is requested to be freed, this subclass will be called to chose how and where to fre...
std::map< llvm::AllocaInst *, llvm::SmallVector< llvm::AssertingVH< llvm::CallInst >, 4 > > scopeAllocs
A map of allocations to a set of instructions which allocate memory as part of the cache.
llvm::AllocaInst * createCacheForScope(LimitContext ctx, llvm::Type *T, llvm::StringRef name, bool shouldFree, bool allocateInternal=true, llvm::Value *extraSize=nullptr)
Create a cache of Type T at the given LimitContext.
virtual void erase(llvm::Instruction *I)
Erase this instruction both from LLVM modules and any local data-structures.
CacheUtility(llvm::TargetLibraryInfo &TLI, llvm::Function *newFunc)
llvm::Value * getCachePointer(llvm::Type *T, bool inForwardPass, llvm::IRBuilder<> &BuilderM, LimitContext ctx, llvm::Value *cache, bool storeInInstructionsMap, const llvm::ValueToValueMapTy &available, llvm::Value *extraSize)
Given an allocation specified by the LimitContext ctx and cache, compute a pointer that can hold the ...
llvm::Value * lookupValueFromCache(llvm::Type *T, bool inForwardPass, llvm::IRBuilder<> &BuilderM, LimitContext ctx, llvm::Value *cache, bool isi1, const llvm::ValueToValueMapTy &available, llvm::Value *extraSize=nullptr, llvm::Value *extraOffset=nullptr)
Given an allocation specified by the LimitContext ctx and cache, lookup the underlying cached value.
bool getContext(llvm::BasicBlock *BB, LoopContext &loopContext, bool ReverseLimit)
Given a BasicBlock BB in newFunc, set loopContext to the relevant contained loop and return true.
llvm::Value * loadFromCachePointer(llvm::Type *T, llvm::IRBuilder<> &BuilderM, llvm::Value *cptr, llvm::Value *cache)
Perform the final load from the cache, applying requisite invariant group and alignment.
std::map< llvm::AllocaInst *, std::set< llvm::AssertingVH< llvm::CallInst > > > scopeFrees
A map of allocations to a set of instructions which free memory as part of the cache.
virtual ~CacheUtility()
void dumpScope()
Print out all currently cached values.
llvm::DominatorTree DT
llvm::TargetLibraryInfo & TLI
Various analysis results of newFunc.
llvm::LoopInfo LI
std::map< llvm::Loop *, LoopContext > loopContexts
Map of Loop to requisite loop information needed for AD (forward/reverse induction/etc)
std::map< llvm::AllocaInst *, llvm::SmallVector< llvm::AssertingVH< llvm::Instruction >, 4 > > scopeInstructions
A map of allocations to a vector of instruction used to create by the allocation Keeping track of the...
llvm::BasicBlock * inversionAllocs
std::map< llvm::Value *, std::pair< llvm::AssertingVH< llvm::AllocaInst >, LimitContext > > scopeMap
A map of values being cached to their underlying allocation/limit context.
llvm::SmallVector< std::pair< llvm::Value *, llvm::SmallVector< std::pair< LoopContext, llvm::Value * >, 4 > >, 0 > SubLimitType
Given a LimitContext ctx, representing a location inside a loop nest, break each of the loops up into...
virtual void replaceAWithB(llvm::Value *A, llvm::Value *B, bool storeInCache=false)
Replace this instruction both in LLVM modules and any local data-structures.
unsigned getCacheAlignment(unsigned bsize) const
llvm::AllocaInst * getDynamicLoopLimit(llvm::Loop *L, bool ReverseLimit=true)
llvm::SmallPtrSet< llvm::LoadInst *, 10 > CacheLookups
SubLimitType getSubLimits(bool inForwardPass, llvm::IRBuilder<> *RB, LimitContext ctx, llvm::Value *extraSize=nullptr)
Given a LimitContext ctx, representing a location inside a loop nest, break each of the loops up into...
void storeInstructionInCache(LimitContext ctx, llvm::IRBuilder<> &BuilderM, llvm::Value *val, llvm::AllocaInst *cache, llvm::MDNode *TBAA=nullptr)
Given an allocation defined at a particular ctx, store the value val in the cache at the location def...
bool isInstructionUsedInLoopInduction(llvm::Instruction &I)
Return whether the given instruction is used as necessary as part of a loop context This includes as ...
llvm::AssumptionCache AC
virtual llvm::Value * lookupM(llvm::Value *val, llvm::IRBuilder<> &BuilderM, const llvm::ValueToValueMapTy &incoming_availalble=llvm::ValueToValueMapTy(), bool tryLegalityCheck=true, llvm::BasicBlock *scope=nullptr)=0
High-level utility to get the value an instruction at a new location specified by BuilderM.
virtual bool assumeDynamicLoopOfSizeOne(llvm::Loop *L) const =0
LimitContext(bool ReverseLimit, llvm::BasicBlock *Block, bool ForceSingleIteration=false)
llvm::BasicBlock * Block
Container for all loop information to synthesize gradients.
llvm::Loop * parent
Parent loop of this loop.
llvm::BasicBlock * header
Header of this loop.
llvm::AssertingVH< llvm::Instruction > incvar
Increment of the induction.
bool dynamic
Whether this loop has a statically analyzable number of iterations.
llvm::SmallPtrSet< llvm::BasicBlock *, 8 > exitBlocks
All blocks this loop exits too.
llvm::AssertingVH< llvm::AllocaInst > antivaralloc
Allocation of induction variable of reverse pass.
llvm::AssertingVH< llvm::PHINode > var
Canonical induction variable of the loop.
AssertingReplacingVH offset
An offset to add to the index when getting the cache pointer.
llvm::BasicBlock * preheader
Preheader of this loop.
AssertingReplacingVH maxLimit
limit is last value of a canonical induction variable iters is number of times loop is run (thus iter...
AssertingReplacingVH allocLimit
An overriding allocation limit size.
AssertingReplacingVH trueLimit