I have been testing two methods of array creation and initialization in JavaScript using Node.js. One method involves dynamically pushing elements into the array, while the other method involves pre-allocating the array to a specific size and then setting each element. Surprisingly, the pre-allocated array method performed significantly worse, and I’m unsure why this is the case.
Here is the code I used for testing:
function testPush(size) {
let start = process.hrtime.bigint();
let array = [];
for (let i = 0; i < size; i++) {
array.push(i);
}
let end = process.hrtime.bigint();
return Number(end - start) / 1e6; // Convert to milliseconds
}
function testPreAllocated(size) {
let start = process.hrtime.bigint();
let array = new Array(size);
// let array = new Uint32Array(size);
// let array = Array.from({ length: size });
for (let i = 0; i < size; i++) {
array[i] = i;
}
let end = process.hrtime.bigint();
return Number(end - start) / 1e6; // Convert to milliseconds
}
function compareArrays() {
const size = 1e8; // 100 million
console.log(`Testing with array size: ${size}`);
console.log("Starting push test...");
let pushTime = testPush(size);
console.log(`Push test completed in ${pushTime.toFixed(2)} ms`);
console.log("Starting pre-allocated test...");
let preAllocatedTime = testPreAllocated(size);
console.log(`Pre-allocated test completed in ${preAllocatedTime.toFixed(2)} ms`);
}
compareArrays();
Output:
Testing with array size: 100000000
Starting push test...
Push test completed in 1624.93 ms
Starting pre-allocated test...
Pre-allocated test completed in 4824.86 ms
I am using Node.js version 20.12.2. My expectation was that pre-allocating the array would be faster because the JavaScript engine wouldn’t need to resize the array continually. However, the results show the opposite. Why does pre-allocating an array result in worse performance compared to dynamically pushing elements into an array in this case?
2
Answers
Yes, you are right, with 100m items the pre-allocation is slower. My guess that due this size the assignment by an index gets slower or other reasons.
Nonetheless, 10m size is faster, so in general the pre-allocation is faster. And it really pre-allocates heap memory, more details in my post here.
Open in the playground
When you do
new Array(size)
withsize
being more 32 million (the constantkMaxFastArrayLength
in V8 to be precise), V8 creates a dictionary-like object rather than a "real" array. The behavior is the same as far as users are concerned (ie, you can still callpush
,pop
,map
,iter
, access elements with[]
, etc), but performance is much worse (but memory usage is much better if you end up not populating the whole array).This doesn’t happen when repeatedly calling
array.push
: the underlying V8 object remains a proper array.You can confirm this by setting
size
to32 * 1024 * 1024
and then32 * 1024 * 1024 + 1
: the performance oftestPreAllocated
drop from 151ms to 987ms (on my machine).You can also confirm this by running something like this in v8 on the command line (run with
--allow-natives-syntax
):The 1st one prints
map: 0x1b6e0018cc81 <Map[16](HOLEY_SMI_ELEMENTS)>
(which means that the array is indeed represented internally as a proper array), while the second one printsmap: 0x1b6e00198701 <Map[16](DICTIONARY_ELEMENTS)>
(which means that the array is represented internally as a dictionary).