44#include < atomic>
55#include < mutex>
66#include < chrono>
7+ #include < queue>
8+ #include < condition_variable>
79
810using namespace BinaryNinja ;
911
@@ -139,6 +141,12 @@ extern "C"
139141 }).detach ();
140142 });
141143
144+ struct PatchInfo {
145+ Ref<Architecture> arch;
146+ uint64_t address;
147+ bool alwaysBranch;
148+ };
149+
142150 auto processFunctionBatch = [](Ref<BinaryView> viewRef,
143151 const std::vector<Ref<Function>>& funcBatch,
144152 int maxPassesPerFunction,
@@ -153,18 +161,28 @@ extern "C"
153161 break ;
154162
155163 auto mlil = func->GetMediumLevelIL ();
156- if (!mlil || mlil->GetInstructionCount () == 0 )
164+ if (!mlil || mlil->GetInstructionCount () == 0 ) {
165+ processedFunctions.fetch_add (1 );
157166 continue ;
167+ }
158168
159169 auto arch = func->GetArchitecture ();
160- if (!arch)
170+ if (!arch) {
171+ processedFunctions.fetch_add (1 );
161172 continue ;
173+ }
174+
175+ size_t instrCount = mlil->GetInstructionCount ();
176+ // if (instrCount > 10000) {
177+ // std::string funcName = func->GetSymbol() ? func->GetSymbol()->GetShortName() : "sub_" + std::to_string(func->GetStart());
178+ // LogInfo("Processing large function %s with %zu instructions", funcName.c_str(), instrCount);
179+ // }
162180
163181 int funcPatches = 0 ;
164182 int pass = 1 ;
165183
166184 while (pass <= maxPassesPerFunction) {
167- int passPatchCount = 0 ;
185+ std::vector<PatchInfo> pendingPatches ;
168186
169187 for (size_t i = 0 ; i < mlil->GetInstructionCount (); ++i) {
170188 auto instr = mlil->GetInstruction (i);
@@ -175,34 +193,33 @@ extern "C"
175193 if (val.state == BNRegisterValueType::ConstantValue) {
176194 if (val.value == 0 ) {
177195 if (viewRef->IsNeverBranchPatchAvailable (arch, instr.address )) {
178- {
179- std::lock_guard<std::mutex> lock (updateMutex);
180- viewRef->ConvertToNop (arch, instr.address );
181- }
182- passPatchCount++;
196+ pendingPatches.push_back ({arch, instr.address , false });
183197 }
184198 }
185199 else {
186200 if (viewRef->IsAlwaysBranchPatchAvailable (arch, instr.address )) {
187- {
188- std::lock_guard<std::mutex> lock (updateMutex);
189- viewRef->AlwaysBranch (arch, instr.address );
190- }
191- passPatchCount++;
201+ pendingPatches.push_back ({arch, instr.address , true });
192202 }
193203 }
194204 }
195205 }
196206
197- funcPatches += passPatchCount;
198-
199- if (passPatchCount == 0 )
207+ if (pendingPatches.empty ())
200208 break ;
201-
209+
202210 {
203211 std::lock_guard<std::mutex> lock (updateMutex);
212+ for (const auto & patch : pendingPatches) {
213+ if (patch.alwaysBranch ) {
214+ viewRef->AlwaysBranch (patch.arch , patch.address );
215+ } else {
216+ viewRef->ConvertToNop (patch.arch , patch.address );
217+ }
218+ }
204219 viewRef->UpdateAnalysis ();
205220 }
221+
222+ funcPatches += pendingPatches.size ();
206223 pass++;
207224 }
208225
@@ -255,24 +272,53 @@ extern "C"
255272 std::atomic<size_t > processedFunctions (0 );
256273 std::mutex updateMutex;
257274
258- std::vector<std::thread> threads;
259- size_t functionsPerThread = (totalFuncs + threadCount - 1 ) / threadCount;
275+ std::queue<Ref<Function>> workQueue;
276+ std::mutex queueMutex;
277+ std::condition_variable cv;
278+ std::atomic<bool > workDone (false );
260279
261- for (int tid = 0 ; tid < threadCount; tid++) {
262- size_t startIdx = tid * functionsPerThread;
263- size_t endIdx = std::min (startIdx + functionsPerThread, totalFuncs);
264-
265- if (startIdx >= totalFuncs)
266- break ;
280+ for (auto & func : functions) {
281+ workQueue.push (func);
282+ }
283+
284+ auto worker = [&]() {
285+ while (true ) {
286+ std::vector<Ref<Function>> localBatch;
287+
288+ {
289+ std::unique_lock<std::mutex> lock (queueMutex);
290+
291+ cv.wait (lock, [&] { return !workQueue.empty () || workDone.load () || shouldCancel.load (); });
292+
293+ if ((workDone.load () && workQueue.empty ()) || shouldCancel.load ())
294+ break ;
295+
296+ size_t remaining = workQueue.size ();
297+ size_t batchSize = 1 ;
298+ if (remaining > 100 ) {
299+ batchSize = 5 ;
300+ } else if (remaining > 50 ) {
301+ batchSize = 3 ;
302+ } else if (remaining > 20 ) {
303+ batchSize = 2 ;
304+ }
305+
306+ for (size_t i = 0 ; i < batchSize && !workQueue.empty (); ++i) {
307+ localBatch.push_back (workQueue.front ());
308+ workQueue.pop ();
309+ }
310+ }
267311
268- std::vector<Ref<Function>> funcBatch;
269- for (size_t i = startIdx; i < endIdx; i++) {
270- funcBatch.push_back (functions[i]);
312+ if (!localBatch.empty ()) {
313+ processFunctionBatch (viewRef, localBatch, maxPassesPerFunction,
314+ globalPatchCount, shouldCancel, updateMutex, processedFunctions);
315+ }
271316 }
272-
273- threads.emplace_back (processFunctionBatch, viewRef, funcBatch, maxPassesPerFunction,
274- std::ref (globalPatchCount), std::ref (shouldCancel),
275- std::ref (updateMutex), std::ref (processedFunctions));
317+ };
318+
319+ std::vector<std::thread> threads;
320+ for (int i = 0 ; i < threadCount; ++i) {
321+ threads.emplace_back (worker);
276322 }
277323
278324 size_t lastProcessed = 0 ;
@@ -299,6 +345,12 @@ extern "C"
299345 std::this_thread::sleep_for (std::chrono::milliseconds (100 ));
300346 }
301347
348+ {
349+ std::lock_guard<std::mutex> lock (queueMutex);
350+ workDone.store (true );
351+ }
352+ cv.notify_all ();
353+
302354 for (auto & t : threads) {
303355 if (t.joinable ())
304356 t.join ();
0 commit comments