[PATCH rtems-littelvgl] Allow to build without drivers.

2019-11-05 Thread Christian Mauderer
This is for example usefull if you want to build without libbsd.
---
 lvgl.py | 8 +---
 wscript | 7 +++
 2 files changed, 12 insertions(+), 3 deletions(-)

diff --git a/lvgl.py b/lvgl.py
index 6d83c63..5452ed0 100644
--- a/lvgl.py
+++ b/lvgl.py
@@ -30,7 +30,7 @@ import os
 import re
 import rtems_waf.rtems as rtems
 
-def source_list():
+def source_list(bld):
 mk_files = ['lvgl/src/lv_core/lv_core.mk',
 'lvgl/src/lv_hal/lv_hal.mk',
 'lvgl/src/lv_objx/lv_objx.mk',
@@ -38,7 +38,9 @@ def source_list():
 'lvgl/src/lv_misc/lv_misc.mk',
 'lvgl/src/lv_themes/lv_themes.mk',
 'lvgl/src/lv_draw/lv_draw.mk',
-'lv_drivers/display/display.mk']
+   ]
+if bld.env.DRIVERS:
+mk_files.append('lv_drivers/display/display.mk')
 sources = []
 cflags = []
 
@@ -64,7 +66,7 @@ def source_list():
 
 def build(bld):
 
-sources, includes = source_list()
+sources, includes = source_list(bld)
 includes.append('.')
 objects = []
 include_paths = []
diff --git a/wscript b/wscript
index 03e010c..ae91daa 100644
--- a/wscript
+++ b/wscript
@@ -43,8 +43,15 @@ def init(ctx):
 
 def options(opt):
 rtems.options(opt)
+opt.add_option("--no-drivers",
+   action = "store_false",
+   default = True,
+   dest = "drivers",
+   help = "Build without lv_drivers." +
+  "Useful for building without libbsd.")
 
 def configure(conf):
+conf.env.DRIVERS = conf.options.drivers
 rtems.configure(conf)
 
 def build(bld):
-- 
2.16.4

___
devel mailing list
devel@rtems.org
http://lists.rtems.org/mailman/listinfo/devel


[PATCH] heap: Simplify _Heap_Block_allocate()

2019-11-05 Thread Sebastian Huber
Determine the next block only once and use it throughout.
---
 cpukit/score/src/heap.c | 48 
 1 file changed, 24 insertions(+), 24 deletions(-)

diff --git a/cpukit/score/src/heap.c b/cpukit/score/src/heap.c
index a67fef783a..b54bb98cff 100644
--- a/cpukit/score/src/heap.c
+++ b/cpukit/score/src/heap.c
@@ -306,6 +306,7 @@ uintptr_t _Heap_Initialize(
 static void _Heap_Block_split(
   Heap_Control *heap,
   Heap_Block *block,
+  Heap_Block *next_block,
   Heap_Block *free_list_anchor,
   uintptr_t alloc_size
 )
@@ -325,14 +326,15 @@ static void _Heap_Block_split(
   uintptr_t const free_size = block_size + HEAP_ALLOC_BONUS - used_size;
   uintptr_t const free_size_limit = min_block_size + HEAP_ALLOC_BONUS;
 
-  Heap_Block *next_block = _Heap_Block_at( block, block_size );
-
   _HAssert( used_size <= block_size + HEAP_ALLOC_BONUS );
   _HAssert( used_size + free_size == block_size + HEAP_ALLOC_BONUS );
 
   if ( free_size >= free_size_limit ) {
 Heap_Block *const free_block = _Heap_Block_at( block, used_block_size );
 uintptr_t free_block_size = block_size - used_block_size;
+uintptr_t const next_block_size = _Heap_Block_size( next_block );
+Heap_Block *const next_next_block =
+  _Heap_Block_at( next_block, next_block_size );
 
 _HAssert( used_block_size + free_block_size == block_size );
 
@@ -341,14 +343,12 @@ static void _Heap_Block_split(
 /* Statistics */
 stats->free_size += free_block_size;
 
-if ( _Heap_Is_used( next_block ) ) {
+if ( _Heap_Is_prev_used( next_next_block ) ) {
   _Heap_Free_list_insert_after( free_list_anchor, free_block );
 
   /* Statistics */
   ++stats->free_blocks;
 } else {
-  uintptr_t const next_block_size = _Heap_Block_size( next_block );
-
   _Heap_Free_list_replace( next_block, free_block );
 
   free_block_size += next_block_size;
@@ -370,11 +370,12 @@ static void _Heap_Block_split(
 static Heap_Block *_Heap_Block_allocate_from_begin(
   Heap_Control *heap,
   Heap_Block *block,
+  Heap_Block *next_block,
   Heap_Block *free_list_anchor,
   uintptr_t alloc_size
 )
 {
-  _Heap_Block_split( heap, block, free_list_anchor, alloc_size );
+  _Heap_Block_split( heap, block, next_block, free_list_anchor, alloc_size );
 
   return block;
 }
@@ -382,6 +383,7 @@ static Heap_Block *_Heap_Block_allocate_from_begin(
 static Heap_Block *_Heap_Block_allocate_from_end(
   Heap_Control *heap,
   Heap_Block *block,
+  Heap_Block *next_block,
   Heap_Block *free_list_anchor,
   uintptr_t alloc_begin,
   uintptr_t alloc_size
@@ -389,23 +391,17 @@ static Heap_Block *_Heap_Block_allocate_from_end(
 {
   Heap_Statistics *const stats = &heap->stats;
 
-  uintptr_t block_begin = (uintptr_t) block;
-  uintptr_t block_size = _Heap_Block_size( block );
-  uintptr_t block_end = block_begin + block_size;
-
   Heap_Block *const new_block =
 _Heap_Block_of_alloc_area( alloc_begin, heap->page_size );
   uintptr_t const new_block_begin = (uintptr_t) new_block;
-  uintptr_t const new_block_size = block_end - new_block_begin;
+  uintptr_t const new_block_size = (uintptr_t) next_block - new_block_begin;
+  uintptr_t block_size_adjusted = (uintptr_t) new_block - (uintptr_t) block;
 
-  block_end = new_block_begin;
-  block_size = block_end - block_begin;
-
-  _HAssert( block_size >= heap->min_block_size );
+  _HAssert( block_size_adjusted >= heap->min_block_size );
   _HAssert( new_block_size >= heap->min_block_size );
 
   /* Statistics */
-  stats->free_size += block_size;
+  stats->free_size += block_size_adjusted;
 
   if ( _Heap_Is_prev_used( block ) ) {
 _Heap_Free_list_insert_after( free_list_anchor, block );
@@ -419,15 +415,15 @@ static Heap_Block *_Heap_Block_allocate_from_end(
 uintptr_t const prev_block_size = _Heap_Block_size( prev_block );
 
 block = prev_block;
-block_size += prev_block_size;
+block_size_adjusted += prev_block_size;
   }
 
-  block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
+  block->size_and_flag = block_size_adjusted | HEAP_PREV_BLOCK_USED;
 
-  new_block->prev_size = block_size;
+  new_block->prev_size = block_size_adjusted;
   new_block->size_and_flag = new_block_size;
 
-  _Heap_Block_split( heap, new_block, free_list_anchor, alloc_size );
+  _Heap_Block_split( heap, new_block, next_block, free_list_anchor, alloc_size 
);
 
   return new_block;
 }
@@ -443,12 +439,16 @@ Heap_Block *_Heap_Block_allocate(
 
   uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
   uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
+  uintptr_t const block_size = _Heap_Block_size( block );
+  Heap_Block *const next_block = _Heap_Block_at( block, block_size );
 
   Heap_Block *free_list_anchor = NULL;
 
   _HAssert( alloc_area_begin <= alloc_begin );
 
-  if ( _Heap_Is_free( block ) ) {
+  if ( _Heap_Is_prev_used( next_block ) ) {
+free_list_anchor = _Heap_Free_list_head( heap );
+  } else {
 free_list_ancho

Support for AXI interconnect targetting the XILINX ZC706

2019-11-05 Thread Tiago Manuel Da Silva Jorge
Dear devel members,

We are working on an interesting project where we are developing applications 
that should run with RTEMS on ARM and additionally communicate with 
Programmable Logic (FPGA). We are using the Xilinx Zynq-7000 SoC ZC706 
Evaluation Kit, and for communication between its PS (Processing System: ARM 
Cortex-A9) and PL (Programmable Logic: Artix-7 FPGA) we are planning to use its 
AXI Interconnect.

Hence, the question is: Is there any support (e.g. driver or the like) for this 
AXI on RTEMS/ARM (to communicate with PL side)?

Thank you in advance.
Best,

[cid:image001.gif@01D593DE.1416F7D0]

Tiago da Silva Jorge
Ingeniero de Software - Unidad de Negocio Segmento de Vuelo y Robótica /
Software Engineer - Space Segment and Robotics Business Unit

GMV
Isaac Newton, 11
P.T.M. Tres Cantos
E-28760 Madrid
Tel. +34 91 807 21 00
Fax +34 91 807 21 99
www.gmv.com 
[cid:image002.png@01D593DE.1416F7D0]

[cid:image003.png@01D593DE.1416F7D0]

[cid:image004.png@01D593DE.1416F7D0]

[cid:image005.png@01D593DE.1416F7D0]

[cid:image006.png@01D593DE.1416F7D0]


[cid:image007.png@01D593DE.1416F7D0]






P Please consider the environment before printing this e-mail.
___
devel mailing list
devel@rtems.org
http://lists.rtems.org/mailman/listinfo/devel

Re: Support for AXI interconnect targetting the XILINX ZC706

2019-11-05 Thread Chris Johns
On 5/11/19 11:43 pm, Tiago Manuel Da Silva Jorge wrote:
> We are working on an interesting project where we are developing applications
> that should run with RTEMS on ARM and additionally communicate with 
> Programmable
> Logic (FPGA). We are using the Xilinx Zynq-7000 SoC ZC706 Evaluation Kit, and
> for communication between its PS (Processing System: ARM Cortex-A9) and PL
> (Programmable Logic: Artix-7 FPGA) we are planning to use its AXI 
> Interconnect.

Welcome to RTEMS and thank for the introduction. It sounds like an interesting
project.

> Hence, the question is: Is there any support (e.g. driver or the like) for 
> this
> AXI on RTEMS/ARM (to communicate with PL side)?

RTEMS provides all the primitives you need:

1. MMU and cache support
2. Interrupts

After this there is no specific support for the AXI bus and that is mostly due
to the wide range of ways you can implement the transfer of data across the AXI
bus to and from the PL. The factors that effect the design are the amount of
data and the performance needed. An other factor is the experience of the PL
design team, if they are new to the Zynq or Xilinx there may be a learning curve
here.

For each part of the PL logic you interact with over the AXI bus you will need a
set of registers assigned to one of the AXI bus ports. The PL team will do this
and set the base address of the port. You need to configure the MMU so you can
access that address space and so the PL. To set up the MMU and cache create a
file in your application, say mmu.c, and add a table similar to:

 8< -
#define ARM_CP15_TEXT_SECTION BSP_START_TEXT_SECTION

#include 
#include 
#include 
#include 
#include 

#ifdef ARMV7_CP15_START_DEFAULT_SECTIONS

BSP_START_DATA_SECTION static const arm_cp15_start_section_config
zynq_mmu_config_table[] = {
  ARMV7_CP15_START_DEFAULT_SECTIONS,
  {
.begin = 0xe000U,
.end   = 0xe020U,
.flags = ARMV7_MMU_DEVICE
  }, {
.begin = 0xf800U,
.end   = 0xf900U,
.flags = ARMV7_MMU_DEVICE
  }, {
.begin = 0x4000U,
.end   = 0xc000U,
.flags = ARMV7_MMU_DEVICE
  }, {
.begin = 0x0010U,
.end   = 0x0040U,
.flags = ARMV7_MMU_DEVICE
  }, {
.begin = 0xfffcu,
.end   = 0xu,
.flags = ARMV7_MMU_DEVICE
  }
};

BSP_START_TEXT_SECTION void zynq_setup_mmu_and_cache(void)
{
  uint32_t ctrl = arm_cp15_start_setup_mmu_and_cache(
ARM_CP15_CTRL_A,
ARM_CP15_CTRL_AFE | ARM_CP15_CTRL_Z
  );

  arm_cp15_start_setup_translation_table_and_enable_mmu_and_cache(
ctrl,
(uint32_t *) bsp_translation_table_base,
ARM_MMU_DEFAULT_CLIENT_DOMAIN,
&zynq_mmu_config_table[0],
RTEMS_ARRAY_SIZE(zynq_mmu_config_table)
  );
}

#endif
 8< -

The function `zynq_setup_mmu_and_cache()` is weak in the BSP and so this
function overrides the default.

You can now access the PL over the AXI bus. From here you can use CDMA as a
simple but slow method to transfer blocks of data, or the PL designers can bus
master the AXI bus and write directly into the main DDR (fast but more complex).
The software would configure the DDR target address via the PL registers. DMA
and bus master writes and reads require you manage the cache invalidates and
flushes. There are calls in RTEMS to handle this for you.

I hope this helps.

Chris
___
devel mailing list
devel@rtems.org
http://lists.rtems.org/mailman/listinfo/devel


Re: [PATCH rtems-littelvgl] Allow to build without drivers.

2019-11-05 Thread Chris Johns
On 5/11/19 10:56 pm, Christian Mauderer wrote:
> This is for example usefull if you want to build without libbsd.

Ok to push.

Thanks
Chris
___
devel mailing list
devel@rtems.org
http://lists.rtems.org/mailman/listinfo/devel


RE: Support for AXI interconnect targetting the XILINX ZC706

2019-11-05 Thread Misra, Avinash
Chris,

This is an excellent response and this is information that I certainly wish I 
had when I started using RTEMS for my Zynq application. 

Tiago, another "gotcha" I want to point out is when launching your target 
application via Xilinx SDK and using RTEMS with SMP Mode (setting # of 
processors > 1). When you program and launch your application to CPU0 on the 
Zynq via Xilinx SDK, Xilinx will place CPU1 in Idle Mode. RTEMS will be stuck 
in a perpetual wait state waiting for CPU1 to come up before launching. You 
will need to physical click start on CPU1 in SDK in order for CPU1 to resume 
execution and allow RTEMS to exit its wait barrier. If you program your image 
to a SD Card or the onboard flash and use FSBL then you will not need to do 
this.

Chris -- Does RTEMS have a wiki where we can maybe compile a list of 
Gotchas/FAQs/Things to be aware of when programming for the Zynq and other 
platforms? 

Thanks,
Avi
-Original Message-
From: devel  On Behalf Of Chris Johns
Sent: Tuesday, November 5, 2019 6:32 PM
To: Tiago Manuel Da Silva Jorge ; devel@rtems.org
Subject: Re: Support for AXI interconnect targetting the XILINX ZC706

On 5/11/19 11:43 pm, Tiago Manuel Da Silva Jorge wrote:
> We are working on an interesting project where we are developing 
> applications that should run with RTEMS on ARM and additionally 
> communicate with Programmable Logic (FPGA). We are using the Xilinx 
> Zynq-7000 SoC ZC706 Evaluation Kit, and for communication between its 
> PS (Processing System: ARM Cortex-A9) and PL (Programmable Logic: Artix-7 
> FPGA) we are planning to use its AXI Interconnect.

Welcome to RTEMS and thank for the introduction. It sounds like an interesting 
project.

> Hence, the question is: Is there any support (e.g. driver or the like) 
> for this AXI on RTEMS/ARM (to communicate with PL side)?

RTEMS provides all the primitives you need:

1. MMU and cache support
2. Interrupts

After this there is no specific support for the AXI bus and that is mostly due 
to the wide range of ways you can implement the transfer of data across the AXI 
bus to and from the PL. The factors that effect the design are the amount of 
data and the performance needed. An other factor is the experience of the PL 
design team, if they are new to the Zynq or Xilinx there may be a learning 
curve here.

For each part of the PL logic you interact with over the AXI bus you will need 
a set of registers assigned to one of the AXI bus ports. The PL team will do 
this and set the base address of the port. You need to configure the MMU so you 
can access that address space and so the PL. To set up the MMU and cache create 
a file in your application, say mmu.c, and add a table similar to:

 8< -
#define ARM_CP15_TEXT_SECTION BSP_START_TEXT_SECTION

#include 
#include 
#include 
#include 
#include 

#ifdef ARMV7_CP15_START_DEFAULT_SECTIONS

BSP_START_DATA_SECTION static const arm_cp15_start_section_config 
zynq_mmu_config_table[] = {
  ARMV7_CP15_START_DEFAULT_SECTIONS,
  {
.begin = 0xe000U,
.end   = 0xe020U,
.flags = ARMV7_MMU_DEVICE
  }, {
.begin = 0xf800U,
.end   = 0xf900U,
.flags = ARMV7_MMU_DEVICE
  }, {
.begin = 0x4000U,
.end   = 0xc000U,
.flags = ARMV7_MMU_DEVICE
  }, {
.begin = 0x0010U,
.end   = 0x0040U,
.flags = ARMV7_MMU_DEVICE
  }, {
.begin = 0xfffcu,
.end   = 0xu,
.flags = ARMV7_MMU_DEVICE
  }
};

BSP_START_TEXT_SECTION void zynq_setup_mmu_and_cache(void) {
  uint32_t ctrl = arm_cp15_start_setup_mmu_and_cache(
ARM_CP15_CTRL_A,
ARM_CP15_CTRL_AFE | ARM_CP15_CTRL_Z
  );

  arm_cp15_start_setup_translation_table_and_enable_mmu_and_cache(
ctrl,
(uint32_t *) bsp_translation_table_base,
ARM_MMU_DEFAULT_CLIENT_DOMAIN,
&zynq_mmu_config_table[0],
RTEMS_ARRAY_SIZE(zynq_mmu_config_table)
  );
}

#endif
 8< -

The function `zynq_setup_mmu_and_cache()` is weak in the BSP and so this 
function overrides the default.

You can now access the PL over the AXI bus. From here you can use CDMA as a 
simple but slow method to transfer blocks of data, or the PL designers can bus 
master the AXI bus and write directly into the main DDR (fast but more complex).
The software would configure the DDR target address via the PL registers. DMA 
and bus master writes and reads require you manage the cache invalidates and 
flushes. There are calls in RTEMS to handle this for you.

I hope this helps.

Chris
___
devel mailing list
devel@rtems.org
http://lists.rtems.org/mailman/listinfo/devel
___
devel mailing list
devel@rtems.org
http://lists.rtems.org/mailman/listinfo/devel


Re: Support for AXI interconnect targetting the XILINX ZC706

2019-11-05 Thread Chris Johns
On 6/11/19 10:51 am, Misra, Avinash wrote:
> Tiago, another "gotcha" I want to point out is when launching your target 
> application via Xilinx SDK and using RTEMS with SMP Mode (setting # of 
> processors > 1). When you program and launch your application to CPU0 on the 
> Zynq via Xilinx SDK, Xilinx will place CPU1 in Idle Mode. RTEMS will be stuck 
> in a perpetual wait state waiting for CPU1 to come up before launching. You 
> will need to physical click start on CPU1 in SDK in order for CPU1 to resume 
> execution and allow RTEMS to exit its wait barrier. If you program your image 
> to a SD Card or the onboard flash and use FSBL then you will not need to do 
> this.

Nice and thanks.

> Chris -- Does RTEMS have a wiki where we can maybe compile a list of 
> Gotchas/FAQs/Things to be aware of when programming for the Zynq and other 
> platforms? 

Yes, here 

https://docs.rtems.org/branches/master/user/bsps/bsps-arm.html#xilinx-zynq

This is where we need to collect the pieces we have. In the wiki there is:

https://devel.rtems.org/wiki/Boards/Zynq%20-%20Zedboard
https://devel.rtems.org/wiki/Debugging/OpenOCD/Xilinx_Zynq

I would welcome patches.

Chris
___
devel mailing list
devel@rtems.org
http://lists.rtems.org/mailman/listinfo/devel


Re: [PATCH rtems-littelvgl] Allow to build without drivers.

2019-11-05 Thread Christian Mauderer
On 06/11/2019 00:35, Chris Johns wrote:
> On 5/11/19 10:56 pm, Christian Mauderer wrote:
>> This is for example usefull if you want to build without libbsd.
> 
> Ok to push.
> 
> Thanks
> Chris
> 

Thanks. Pushed it.

-- 

embedded brains GmbH
Herr Christian Mauderer
Dornierstr. 4
D-82178 Puchheim
Germany
email: christian.maude...@embedded-brains.de
Phone: +49-89-18 94 741 - 18
Fax:   +49-89-18 94 741 - 08
PGP: Public key available on request.

Diese Nachricht ist keine geschäftliche Mitteilung im Sinne des EHUG.
___
devel mailing list
devel@rtems.org
http://lists.rtems.org/mailman/listinfo/devel