Hi Dr. Sherrill,

Thank you for your quick reply.

You can always compare the behavior with a normal uniprocessor build.

Yes, this is what I am doing.

But this test has Init, High, IDLE, and OPERATION_COUNT+2 medium tasks.
> All should be at unique priorities.

Yes, thanks for confirming.


> It looks like the test is doing two timing operations. These are for
> specific yield cases. Which one isn't behaving right for you?

The 2nd thread that executes the Tasks function exits and the code
terminates, i.e. the execution does not transfer to the third Tasks thread
function.

I have attached the gdb trace (executing at this
<https://github.com/richidubey/rtems/blob/a60d87b321e6658e10f39f9dd3f7b2c003d84c3c/cpukit/score/src/schedulerstrongapa.c#L153>
function)which shows that there are two different threads with High
priority, but this should not be the case, right? There should only be a
single High thread. Can you please take a look at it?

Thanks,
Richi.




On Mon, Dec 28, 2020 at 9:25 PM Joel Sherrill <j...@rtems.org> wrote:

>
>
> On Mon, Dec 28, 2020, 12:18 AM Richi Dubey <richidu...@gmail.com> wrote:
>
>> Hi,
>>
>> When I am debugging the tm24 running on Strong APA scheduler, I can see
>> that when the Tasks
>> <https://git.rtems.org/rtems/tree/testsuites/tmtests/tm24/task1.c#n106>
>> function is being executed, there are two threads with priority 2, with
>> status SCHEDULER_SMP_NODE_BLOCKED in the list of threads, with both of them
>> having different addresses. How can there be two such threads?  Shouldn't
>> there only be a single thread with priority 2 that runs the High_task
>> <https://git.rtems.org/rtems/tree/testsuites/tmtests/tm24/task1.c#n77>
>> function? Please let me know your views about this.
>>
>
> You can always compare the behavior with a normal uniprocessor build.
>
> But this test has Init, High, IDLE, and OPERATION_COUNT+2 medium tasks.
> All should be at unique priorities.
>
> It looks like the test is doing two timing operations. These are for
> specific yield cases. Which one isn't behaving right for you?
>
>>
>>
>>
>> Thank you,
>> Richi.
>> _______________________________________________
>> devel mailing list
>> devel@rtems.org
>> http://lists.rtems.org/mailman/listinfo/devel
>
>
189         while ( next != tail ) {
(gdb) 
0x001117cc      189         while ( next != tail ) {
(gdb) 
0x001117ce      189         while ( next != tail ) {
(gdb) 
0x001117d0      189         while ( next != tail ) {
(gdb) 
190           node = (Scheduler_strong_APA_Node*) 
STRONG_SCHEDULER_NODE_OF_CHAIN( next );
(gdb) 
0x001116f2      190           node = (Scheduler_strong_APA_Node*) 
STRONG_SCHEDULER_NODE_OF_CHAIN( next );
(gdb) 
190           node = (Scheduler_strong_APA_Node*) 
STRONG_SCHEDULER_NODE_OF_CHAIN( next );
(gdb) 
192           index_curr_cpu = _Per_CPU_Get_index( curr_CPU );
(gdb) 
0x001116f8      192           index_curr_cpu = _Per_CPU_Get_index( curr_CPU );
(gdb) p node
$3 = (Scheduler_strong_APA_Node *) 0x2011c8 <_RTEMS_tasks_Objects+600>
(gdb) p *node
$4 = {
  Base = {
    Base = {
      Node = {
        Chain = {
          next = 0x200838 <_Configuration_Scheduler_strong_APA_dflt+16>,
          previous = 0x200834 <_Configuration_Scheduler_strong_APA_dflt+12>
        },
        RBTree = {
          Node = {
            rbe_left = 0x200838 <_Configuration_Scheduler_strong_APA_dflt+16>,
            rbe_right = 0x200834 <_Configuration_Scheduler_strong_APA_dflt+12>,
            rbe_parent = 0x0,
            rbe_color = 0
          }
        }
      },
      sticky_level = 0,
      user = 0x200f70 <_RTEMS_tasks_Objects>,
      idle = 0x0,
      owner = 0x200f70 <_RTEMS_tasks_Objects>,
      Thread = {
        Wait_node = {
          next = 0x200fcc <_RTEMS_tasks_Objects+92>,
          previous = 0x200fc8 <_RTEMS_tasks_Objects+88>
        },
        Scheduler_node = {
          Chain = {
            next = 0x200fd8 <_RTEMS_tasks_Objects+104>,
            previous = 0x200fd4 <_RTEMS_tasks_Objects+100>
          },
          next = 0x200fd8 <_RTEMS_tasks_Objects+104>
        },
        next_request = 0x0,
        request = SCHEDULER_NODE_REQUEST_NOT_PENDING
      },
      Wait = {
        Priority = {
          Node = {
            Node = {
              Chain = {
                next = 0x0,
                previous = 0x0
              },
              RBTree = {
                Node = {
                  rbe_left = 0x0,
                  rbe_right = 0x0,
                  rbe_parent = 0x0,
                  rbe_color = 0
                }
              }
            },
            priority = 2
          },
          Contributors = {
            rbh_root = 0x200f98 <_RTEMS_tasks_Objects+40>
          },
          scheduler = 0x11bf78 <_Scheduler_Table>,
          Action = {
            next = 0x0,
            node = 0x0,
            type = PRIORITY_ACTION_ADD
          }
        }
      },
      Priority = {
        value = 2,
        Lock = {
          sequence = 0
        }
      }
    },
    state = SCHEDULER_SMP_NODE_BLOCKED,
    priority = 2
  },
  Ready_node = {
    next = 0x201758 <_RTEMS_tasks_Objects+2024>,
    previous = 0x200b50 <_Thread_Objects+736>
  },
  cpu_to_preempt = 0x200540 <_Per_CPU_Information>,
  Affinity = {
    __bits = {1}
  }
}
(gdb) ni
0x001116fc      192           index_curr_cpu = _Per_CPU_Get_index( curr_CPU );
(gdb) 
194             _Processor_mask_Is_set( &node->Affinity, index_curr_cpu )
(gdb) 
0x00111700      194             _Processor_mask_Is_set( &node->Affinity, 
index_curr_cpu )
(gdb) 
194             _Processor_mask_Is_set( &node->Affinity, index_curr_cpu )
(gdb) 
0x00111704      194             _Processor_mask_Is_set( &node->Affinity, 
index_curr_cpu )
(gdb) 
0x00111706      194             _Processor_mask_Is_set( &node->Affinity, 
index_curr_cpu )
(gdb) 
0x0011170a      194             _Processor_mask_Is_set( &node->Affinity, 
index_curr_cpu )
(gdb) 
193           if (
(gdb) 
0x0011170e      193           if (
(gdb) 
196             curr_state = _Scheduler_SMP_Node_state( &node->Base.Base );
(gdb) 
196             curr_state = _Scheduler_SMP_Node_state( &node->Base.Base );
(gdb) 
0x00111714      196             curr_state = _Scheduler_SMP_Node_state( 
&node->Base.Base );
(gdb) 
0x00111718      196             curr_state = _Scheduler_SMP_Node_state( 
&node->Base.Base );
(gdb) 
198             if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
(gdb) 
0x0011171c      198             if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED 
) {
(gdb) 
0x0011171e      198             if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED 
) {
(gdb) 
216             else if ( curr_state == SCHEDULER_SMP_NODE_READY ) {
(gdb) 
0x00111772      216             else if ( curr_state == 
SCHEDULER_SMP_NODE_READY ) {
(gdb) 
0x00111774      216             else if ( curr_state == 
SCHEDULER_SMP_NODE_READY ) {
(gdb) 
233         next = _Chain_Next( next );
(gdb) 
0x001117c4      233         next = _Chain_Next( next );
(gdb) 
0x001117c8      233         next = _Chain_Next( next );
(gdb) 
189         while ( next != tail ) {
(gdb) 
0x001117cc      189         while ( next != tail ) {
(gdb) 
0x001117ce      189         while ( next != tail ) {
(gdb) 
0x001117d0      189         while ( next != tail ) {
(gdb) 
190           node = (Scheduler_strong_APA_Node*) 
STRONG_SCHEDULER_NODE_OF_CHAIN( next );
(gdb) 
0x001116f2      190           node = (Scheduler_strong_APA_Node*) 
STRONG_SCHEDULER_NODE_OF_CHAIN( next );
(gdb) 
190           node = (Scheduler_strong_APA_Node*) 
STRONG_SCHEDULER_NODE_OF_CHAIN( next );
(gdb) 
192           index_curr_cpu = _Per_CPU_Get_index( curr_CPU );
(gdb) p node
$5 = (Scheduler_strong_APA_Node *) 0x2016d0 <_RTEMS_tasks_Objects+1888>
(gdb) p *node
$6 = {
  Base = {
    Base = {
      Node = {
        Chain = {
          next = 0x200838 <_Configuration_Scheduler_strong_APA_dflt+16>,
          previous = 0x200834 <_Configuration_Scheduler_strong_APA_dflt+12>
        },
        RBTree = {
          Node = {
            rbe_left = 0x200838 <_Configuration_Scheduler_strong_APA_dflt+16>,
            rbe_right = 0x200834 <_Configuration_Scheduler_strong_APA_dflt+12>,
            rbe_parent = 0x0,
            rbe_color = 0
          }
        }
      },
      sticky_level = 0,
      user = 0x201478 <_RTEMS_tasks_Objects+1288>,
      idle = 0x0,
      owner = 0x201478 <_RTEMS_tasks_Objects+1288>,
      Thread = {
        Wait_node = {
          next = 0x2014d4 <_RTEMS_tasks_Objects+1380>,
          previous = 0x2014d0 <_RTEMS_tasks_Objects+1376>
        },
        Scheduler_node = {
          Chain = {
            next = 0x2014e0 <_RTEMS_tasks_Objects+1392>,
            previous = 0x2014dc <_RTEMS_tasks_Objects+1388>
          },
          next = 0x2014e0 <_RTEMS_tasks_Objects+1392>
        },
        next_request = 0x0,
        request = SCHEDULER_NODE_REQUEST_NOT_PENDING
      },
      Wait = {
        Priority = {
          Node = {
            Node = {
              Chain = {
                next = 0x0,
                previous = 0x0
              },
              RBTree = {
                Node = {
                  rbe_left = 0x0,
                  rbe_right = 0x0,
                  rbe_parent = 0x0,
                  rbe_color = 0
                }
              }
            },
            priority = 2
          },
          Contributors = {
            rbh_root = 0x2014a0 <_RTEMS_tasks_Objects+1328>
          },
          scheduler = 0x11bf78 <_Scheduler_Table>,
          Action = {
            next = 0x0,
            node = 0x0,
            type = PRIORITY_ACTION_ADD
          }
        }
      },
      Priority = {
        value = 2,
        Lock = {
          sequence = 0
        }
      }
    },
    state = SCHEDULER_SMP_NODE_BLOCKED,
    priority = 2
  },
  Ready_node = {
    next = 0x201c60 <_RTEMS_tasks_Objects+3312>,
    previous = 0x201250 <_RTEMS_tasks_Objects+736>
  },
  cpu_to_preempt = 0x200540 <_Per_CPU_Information>,
  Affinity = {
    __bits = {1}
  }
}
(gdb) ni
0x001116f8      192           index_curr_cpu = _Per_CPU_Get_index( curr_CPU );
(gdb) 

0x001116fc      192           index_curr_cpu = _Per_CPU_Get_index( curr_CPU );
(gdb) 
194             _Processor_mask_Is_set( &node->Affinity, index_curr_cpu )
(gdb) 
0x00111700      194             _Processor_mask_Is_set( &node->Affinity, 
index_curr_cpu )
(gdb) 

_______________________________________________
devel mailing list
devel@rtems.org
http://lists.rtems.org/mailman/listinfo/devel

Reply via email to