I have a problem with node_allocator.
The "repro" code:
#include
#include
#include
////////////////
using namespace boost::interprocess;
////////////////
enum {
A_SHARED_MEMORY_SIZE = 4096 * 50,
A_POOL_SIZE = 512,
};
////////////////
static char g_static_buffer[A_SHARED_MEMORY_SIZE];
static managed_external_buffer g_sharm (create_only, g_static_buffer,
A_SHARED_MEMORY_SIZE);
////////////////
typedef node_allocator int_allocator_t;
typedef list IntList;
////////////////
struct T {
IntList int_list;
//
T ()
: int_list (int_allocator_t(g_sharm.get_segment_manager()))
{}
};
typedef node_allocator tis_allocator_t;
typedef list TList;
////////////////
int main()
{
TList* p =
g_sharm.construct<TList>(anonymous_instance)(tis_allocator_t(g_sharm.get_segment_manager()));
try {
enum { N = 2 };
// Create objects then delete them
for (int n = 0; n < N; n++) {
// Create T-type objects
for (int i = 0; i < A_POOL_SIZE; i++) {
p->push_back(T()); // push new object to the list
}
p->clear(); // Delete all T objects
}
} catch (bad_alloc &) {
cout << "No memory" << endl;
}
g_sharm.destroy_ptr(p);
return 0;
}
The problem is the folowing.
If N > 1, on _second_ "pass" (n=1) the bad_alloc exception is thrown while
pushing new T object to the list.
Why?
I supposed that on any "pass", except the first one, no new T-type objects
are actually created in shared memory. They should be taken from the pool
instead. I.e., if there's enough memory on the first "pass", it must be also
enough on any other one.
The problem is specific for Visual Studio 2005, when producing release build
(all options carry their default values).
Thanks,
Alex.