Skip to content

Commit

Permalink
[nrf fromlist] logging: frontend_stmesp: Avoid unaligned word access
Browse files Browse the repository at this point in the history
write_data function which was writing to STMESP data registers was
starting by writing words and tail was written using byte access.
However, RISCV core does not support unaligned access and on Cortex-M33
even if supported it is faster to do aligned access. Reworked
write_data to start first by writing data using byte or half word
access until data pointer is word aligned, then word access is used
and finally tail is written using byte or half word access.

Upstream PR: zephyrproject-rtos/zephyr#78332

Signed-off-by: Krzysztof Chruściński <krzysztof.chruscinski@nordicsemi.no>
  • Loading branch information
nordic-krch authored and rlubos committed Sep 20, 2024
1 parent 83f64ee commit 5bb7bb0
Showing 1 changed file with 64 additions and 6 deletions.
70 changes: 64 additions & 6 deletions subsys/logging/frontends/log_frontend_stmesp.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,10 @@ BUILD_ASSERT(sizeof(void *) == sizeof(uint32_t));
stmesp_data8(reg, data, timestamp, marked, \
IS_ENABLED(CONFIG_LOG_FRONTEND_STMESP_GUARANTEED_ACCESS))

#define STM_D16(reg, data, timestamp, marked) \
stmesp_data16(reg, data, timestamp, marked, \
IS_ENABLED(CONFIG_LOG_FRONTEND_STMESP_GUARANTEED_ACCESS))

#define STM_D32(reg, data, timestamp, marked) \
stmesp_data32(reg, data, timestamp, marked, \
IS_ENABLED(CONFIG_LOG_FRONTEND_STMESP_GUARANTEED_ACCESS))
Expand Down Expand Up @@ -211,18 +215,72 @@ static int early_package_cb(const void *buf, size_t len, void *ctx)

static inline void write_data(const void *data, size_t len, STMESP_Type *const stm_esp)
{
uint32_t *p32 = (uint32_t *)data;
const uint8_t *p8 = data;
const uint32_t *p32;
uint32_t unaligned;

if (!len) {
return;
}

/* Start by writing using D8 or D16 until pointer is word aligned. */
unaligned = (uintptr_t)data & 0x00000003UL;
if (unaligned != 0) {
unaligned = 4 - unaligned;
unaligned = MIN(len, unaligned);

len -= unaligned;

switch (unaligned) {
case 3:
STM_D8(stm_esp, *p8++, false, false);
STM_D16(stm_esp, *(uint16_t *)p8, false, false);
p8 += sizeof(uint16_t);
break;
case 2:
if (len) {
STM_D16(stm_esp, *(uint16_t *)p8, false, false);
p8 += sizeof(uint16_t);
} else {
/* If len 0 then it means that even though 2 bytes are
* to be copied we can have address which is not aligned
* to 2 bytes.
*/
STM_D8(stm_esp, *p8++, false, false);
STM_D8(stm_esp, *p8++, false, false);
}
break;
default:
/* 1 byte to align. */
STM_D8(stm_esp, *p8++, false, false);
}
}

p32 = (const uint32_t *)p8;

/* Use D32 to write as much data as possible. */
while (len >= sizeof(uint32_t)) {
STM_D32(stm_esp, *p32++, false, false);
len -= sizeof(uint32_t);
}

uint8_t *p8 = (uint8_t *)p32;

while (len > 0) {
STM_D8(stm_esp, *p8++, false, false);
len--;
/* Write tail using D16 or D8. Address is word aligned at that point. */
if (len) {
p8 = (const uint8_t *)p32;
switch (len) {
case 2:
STM_D16(stm_esp, *(uint16_t *)p8, false, false);
p8 += sizeof(uint16_t);
break;
case 3:
STM_D16(stm_esp, *(uint16_t *)p8, false, false);
p8 += sizeof(uint16_t);
/* fallthrough */
default:
/* 1 byte to align. */
STM_D8(stm_esp, *p8++, false, false);
break;
}
}
}

Expand Down

0 comments on commit 5bb7bb0

Please sign in to comment.