Listing 1: OpenMP enables parallelization of loops or regions without large-scale code modifications.
//-- Avoid overhead by entering a parallel section once, then dividing the
// work within the parallel section.
#pragma omp parallel
{
//-- Only the master thread executes the following function call
#pragma omp master
MasterThreadFunc();
//-- The threads that process this loop continue immediately to the next
// block without waiting for all threads to finish this loop
#pragma omp for nowait
for( i = 0; i < X; i++ )
{
doWork();
// ...
}
//-- Any thread may excute this; generally it is likely to be the first
// thread to complete its work from the above loop
#pragma omp single
OneTimeAnyThreadFunc();
//-- The threads that process this loop will synchronize at the end of
// the loop before continuing execution
#pragma omp for
for( i = 0; i < Y; i++ )
{
doOtherWork();
// ...
}
//-- Sections instruct OpenMP to divide the identified sections across
// the multiple threads.
#pragma omp sections
{
// These sections are divided among the threads, causing each to be
// executed exactly once, and in parallel with the other. If the
// program contains more sections than threads, the remaining
// sections will be scheduled as threads finish their previously
// assigned sections.
#pragma omp section
{
doTaskA();
}
#pragma omp section
{
doTaskB();
}
#pragma omp section
{
doTaskC();
}
}
}