Toggle navigation
MeasureThat.net
Create a benchmark
Tools
Feedback
FAQ
Register
Log In
Stack strategies - plain yield
(version: 0)
Comparing performance of:
generators vs Manual
Created:
9 years ago
by:
Guest
Jump to the latest result
Tests:
generators
var GAS = 0; function run(init, makeGen) { GAS = init; var gen = makeGen(); while(true) { var res = gen.next(); if(res.done) { return res.value; } else { GAS = init; continue; } } } var linkNames = ["first", "rest"]; function* link(f, r) { return { $name: "link", $fields: linkNames, first: f, rest: r } } var empty = { $name: "empty" }; function* map(f, l) { if(GAS-- <= 0) { yield null; } var ans; var dispatch = { empty: 0, link: 1 }; var case_step = dispatch[l.$name]; switch(case_step) { case 0: ans = empty break; case 1: fst = l[l.$fields[0]]; rst = l[l.$fields[1]]; var t1 = yield f(fst); var t2 = yield map(f, rst); var t3 = yield link(t1, t2); ans = t3; } return ans; } var buildList = function(n) { var l = empty; for(var i = 0; i < n; i++) { l = link(i, l).next().value; } return l; } run(100, function*() { return yield map(function*(l) { return l + 1; }, buildList(4000)); });
Manual
var GAS = 0; function run(init, runner) { GAS = init; var frames = [{ run: runner }]; var thisFrame = frames.pop(); while(true) { var res = thisFrame.run(thisFrame); if(res.isCont) { var len = res.frames.length; for(var i = 0; i < len; i++) { frames.push(res.frames.pop()); } GAS = init; thisFrame = frames.pop(); } else { if(frames.length <= 0) { break; } thisFrame = frames.pop(); thisFrame.ans = res; } } return res; } var linkNames = ["first", "rest"]; function link(f, r) { return { $name: "link", $fields: linkNames, first: f, rest: r } } var empty = { $name: "empty" }; function map(f, l) { var step = 0; if(f.isFrame) { var ans = f.ans; step = f.step; var t1 = f.vars[0]; var fst = f.vars[1]; var rst = f.vars[2]; var t2 = f.vars[3]; l = f.args[1]; f = f.args[0]; } if(GAS-- <= 0) { return { isCont: true, frames: [{ run: map, isFrame: true, vars: [t1, fst, rst, t2], args: [f, l], step: step }] }; } while(true) { switch(step) { case 0: var dispatch = { empty: 1, link: 2 }; step = dispatch[l.$name]; break; case 1: ans = empty step = 5 break; case 2: fst = l[l.$fields[0]]; rst = l[l.$fields[1]]; step = 3; ans = f(fst); break; case 3: t1 = ans; step = 4 ans = map(f, rst); break; case 4: t2 = ans; step = 5 ans = link(t1, t2); break; case 5: GAS++; return ans; } if(ans && ans.isCont) { ans.frames.push({ run: map, isFrame: true, vars: [t1, fst, rst, t2], args: [f, l], step: step }); return ans; } } } var buildList = function(n) { var l = empty; for(var i = 0; i < n; i++) { l = link(i, l); } return l; } run(100, function() { return map(function(l) { return l + 1; }, buildList(4000)); });
Rendered benchmark preparation results:
Suite status:
<idle, ready to run>
Run tests (2)
Previous results
Fork
Test case name
Result
generators
Manual
Fastest:
N/A
Slowest:
N/A
Latest run results:
No previous run results
This benchmark does not have any results yet. Be the first one
to run it!
Autogenerated LLM Summary
(model
llama3.2:3b
, generated one year ago):
Let's break down the provided benchmark definitions and test cases. **Benchmark Definitions** The benchmark definitions are represented as JSON objects, but in this case, they are not used to define any actual benchmarking logic. Instead, they seem to be placeholder text indicating what kind of tests should be performed. There are two benchmark definitions: 1. "Stack strategies - plain yield" 2. "Manual" These definitions indicate that the tests will focus on different approaches to handling stack-related issues in JavaScript. The first definition seems to be a placeholder and is not being used in the test cases. **Test Cases** The test cases are individual objects within an array, each representing a specific scenario or approach to testing the JavaScript engine's behavior. There are two test cases: 1. "generators" 2. "Manual" **Test Case 1: "generators"** This test case appears to be using a generator function to iterate over an array of numbers and increment each number by 1. The generator function is designed to simulate the execution of a loop in JavaScript. The test case uses a specific implementation of a generator function, which is not standard in JavaScript. Instead, it uses a custom implementation that includes various optimizations and workarounds for handling stack-related issues. **Test Case 2: "Manual"** This test case appears to be using a manual approach to incrementing an array of numbers by 1. It does not use any generator functions or loops, instead relying on explicit increments using arithmetic operations. The manual implementation is likely intended to simulate the behavior of a JavaScript engine that handles stack-related issues differently than the "generators" test case. **Benchmark Results** The benchmark results show the performance data for each test case, including: * Raw UA string (the User Agent string sent by the browser) * Browser and device platform information * Operating system information * Executions per second The results indicate that the "generators" test case performs significantly better than the manual implementation, suggesting that the JavaScript engine handles stack-related issues more efficiently in this approach. Overall, these benchmark definitions and test cases seem to be designed to evaluate the performance of a JavaScript engine's handling of stack-related issues, such as generator functions or explicit loop implementations.
Related benchmarks:
JavaScript spread vs slice vs for
JavaScript spread operator vs Slice/Splice performance 2edas
Push vs Spread vs Double loop Ultimate
literal, literal spread, vs reuse
Array.prototype.slice vs spread operator vs loop 2D
Comments
Confirm delete:
Do you really want to delete benchmark?