Consider the following (contrived) memory arena (pool):
template<typename T>
class Arena {
public:
Arena(size_t size)
: m_buffer(new char[size * sizeof(T)]),
m_next_available(0),
m_size(size) { }
void* placement() {
return m_buffer.get() + (m_next_available++) * sizeof(T);
}
private:
std::unique_ptr<char> m_buffer;
std::atomic<size_t> m_next_available;
size_t m_size;
};
As you can see, it uses an atomic variable m_next_available
to keep track of the next available memory block.
When a new memory block is requested, the Arena
instance should provide the pointer to the appropriate block (as indicated), and obtain the next available one; this is where I'm having problems.
I would like an atomic operation capable of expressing the following: if the next available block is bigger than the arena size, then it should be set to zero (I will be overwriting on the memory locations).
For reference, the non-atomic version of the same Arena
is presented below. Notice how when I go past five elements (the size of the Arena
), the address to the new element is the address corresponding to the first block (as expected).
#include<cstddef>
#include<iostream>
#include<memory>
template<typename T>
class Arena {
public:
Arena(size_t size)
: m_buffer(new char[size * sizeof(T)]),
m_next_available(0),
m_size(size) { }
void* placement() {
// this is the logic that I'd like to make atomic
if(m_next_available == m_size) {
m_next_available = 0;
}
return m_buffer.get() + (m_next_available++) * sizeof(T);
}
private:
std::unique_ptr<char> m_buffer;
size_t m_next_available;
size_t m_size;
};
template<typename T>
void* operator new(size_t sz, Arena<T>& a) {
(void)sz; // to avoid "warning: unused variable sz"
return a.placement();
}
int main() {
Arena<double> a(5);
double x;
while(std::cin>>x) {
double *data = new(a) double(x);
std::cout<<"address of new item: "<<data<<std::endl;
}
}
Compiled with GCC 4.8.1 on OS X 10.7.4 (g++ example.cpp -std=c++11
)
1
address of new item: 0x7fb48b4008a0
2
address of new item: 0x7fb48b4008a8
3
address of new item: 0x7fb48b4008b0
4
address of new item: 0x7fb48b4008b8
5
address of new item: 0x7fb48b4008c0
1
address of new item: 0x7fb48b4008a0 # this is the same as the first one
Edit:
As per previous attempts and Steve Jessop's valuable suggestions, I will simply increment atomically the m_next_available
counter, and modulo m_size
the resulting number to obtain the cycle. If you are interested, the code below seems to work.
#include<cstddef>
#include<iostream>
#include<memory>
#include<atomic>
#include<thread>
#include<vector>
template<typename T>
class Arena {
public:
Arena(size_t size)
: m_buffer(new char[size * sizeof(T)]),
m_next_available(0),
m_size(size) { }
void* placement() {
return m_buffer.get() + (m_next_available++ % m_size) * sizeof(T);
}
size_t allocations() const {
return m_next_available;
}
void peek() const {
// print whatever you can
for(size_t k=0; k<m_size; k++) {
std::cout<<(*reinterpret_cast<double*>(m_buffer.get() + k * sizeof(T)))
<<" ";
}
std::cout<<std::endl;
}
private:
std::unique_ptr<char[]> m_buffer;
std::atomic<size_t> m_next_available;
size_t m_size;
};
template<typename T>
void* operator new(size_t sz, Arena<T>& a) {
(void)sz; // to avoid "warning: unused variable sz"
return a.placement();
}
Arena<double> arena(10);
std::atomic<bool> continue_printing;
struct Worker {
void operator()() const {
for(size_t k=0; k<10000; k++) {
new(arena) double(k);
}
}
};
int main() {
continue_printing = true;
std::thread t([](){ while(continue_printing) arena.peek(); });
t.detach();
std::vector<std::thread> threads;
for(size_t k=0; k<100; k++) {
threads.emplace_back(Worker());
}
for(auto & thread : threads) {
thread.join();
}
continue_printing = false;
std::cout<<"all threads finished"<<std::endl
<<"final population in the arena: "<<std::endl;
arena.peek();
std::cout<<"Number of elements that requested allocation: "
<<arena.allocations()<<std::endl;
}
Output:
$ ./a.out
0 9 57 949 90 371 144 976 132 384
876 679 600 926 610 948 622 589 632 1480
4553 4580 4499 4592 4597 4518 7512 6344 4546 6362
7597 4595 4659 7626 4616 6459 6470 6480 4689 7676
4666 6544 7738 6562 7755 7766 6582 6593 6604 4727
[----- snip ----- snip ----- snip -----]
9409 9925 9934 9446 9956 9966 9977 9490 9508 9549
9720 9811 9892 9953 9994 9995 9996 9997 9998 9999
9990 9991 9992 9993 9994 9995 9996 9997 9998 9999
9990 9991 9992 9993 9994 9995 9996 9997 9998 9999
all threads finished
final population in the arena:
9990 9991 9992 9993 9994 9995 9996 9997 9998 9999
Number of elements that requested allocation: 1000000