stm: Upgrade to latest CMSIS libraries.

CMSIS V3.01 -> V3.20. Now in stm/cmsis to keep separate from ST
libraries.
This commit is contained in:
Damien George 2014-01-19 16:43:33 +00:00
parent a11ceca807
commit 1277753812
6 changed files with 226 additions and 96 deletions

View File

@ -8,6 +8,7 @@ include ../py/py.mk
RM = /bin/rm
ECHO = @echo
CMSIS=cmsis
STMSRC=lib
#STMOTGSRC=lib-otg
FATFSSRC=fatfs
@ -22,7 +23,7 @@ OBJCOPY = arm-none-eabi-objcopy
SIZE = arm-none-eabi-size
CFLAGS_CORTEX_M4 = -mthumb -mtune=cortex-m4 -mabi=aapcs-linux -mcpu=cortex-m4 -mfpu=fpv4-sp-d16 -mfloat-abi=hard -fsingle-precision-constant -Wdouble-promotion -DSTM32F40XX -DHSE_VALUE=8000000
CFLAGS = -I. -I$(PY_SRC) -I$(FATFSSRC) -I$(STMSRC) -Wall -ansi -std=gnu99 $(CFLAGS_CORTEX_M4) -D$(TARGET)
CFLAGS = -I. -I$(PY_SRC) -I$(FATFSSRC) -I$(CMSIS) -I$(STMSRC) -Wall -ansi -std=gnu99 $(CFLAGS_CORTEX_M4) -D$(TARGET)
#CFLAGS += -I$(STMOTGSRC) -DUSE_HOST_MODE -DUSE_OTG_MODE
#Debugging/Optimization

View File

@ -1,25 +1,40 @@
/**************************************************************************//**
* @file core_cm4.h
* @brief CMSIS Cortex-M4 Core Peripheral Access Layer Header File
* @version V3.01
* @date 22. March 2012
* @version V3.20
* @date 25. February 2013
*
* @note
* Copyright (C) 2009-2012 ARM Limited. All rights reserved.
*
* @par
* ARM Limited (ARM) is supplying this software for use with Cortex-M
* processor based microcontrollers. This file can be freely distributed
* within development tools that are supporting such ARM based processors.
*
* @par
* THIS SOFTWARE IS PROVIDED "AS IS". NO WARRANTIES, WHETHER EXPRESS, IMPLIED
* OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE.
* ARM SHALL NOT, IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL, OR
* CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
*
******************************************************************************/
/* Copyright (c) 2009 - 2013 ARM LIMITED
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of ARM nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
*
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------*/
#if defined ( __ICCARM__ )
#pragma system_include /* treat file as system include file for MISRA check */
#endif
@ -54,7 +69,7 @@
/* CMSIS CM4 definitions */
#define __CM4_CMSIS_VERSION_MAIN (0x03) /*!< [31:16] CMSIS HAL main version */
#define __CM4_CMSIS_VERSION_SUB (0x01) /*!< [15:0] CMSIS HAL sub version */
#define __CM4_CMSIS_VERSION_SUB (0x20) /*!< [15:0] CMSIS HAL sub version */
#define __CM4_CMSIS_VERSION ((__CM4_CMSIS_VERSION_MAIN << 16) | \
__CM4_CMSIS_VERSION_SUB ) /*!< CMSIS HAL version number */
@ -669,14 +684,14 @@ typedef struct
__IO uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */
uint32_t RESERVED2[15];
__IO uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */
uint32_t RESERVED3[29];
uint32_t RESERVED3[29];
__O uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */
__I uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */
__IO uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */
uint32_t RESERVED4[43];
uint32_t RESERVED4[43];
__O uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */
__I uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */
uint32_t RESERVED5[6];
uint32_t RESERVED5[6];
__I uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */
__I uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */
__I uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */
@ -1661,9 +1676,9 @@ __STATIC_INLINE void NVIC_SystemReset(void)
*/
__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks)
{
if (ticks > SysTick_LOAD_RELOAD_Msk) return (1); /* Reload value impossible */
if ((ticks - 1) > SysTick_LOAD_RELOAD_Msk) return (1); /* Reload value impossible */
SysTick->LOAD = (ticks & SysTick_LOAD_RELOAD_Msk) - 1; /* set reload register */
SysTick->LOAD = ticks - 1; /* set reload register */
NVIC_SetPriority (SysTick_IRQn, (1<<__NVIC_PRIO_BITS) - 1); /* set Priority for Systick Interrupt */
SysTick->VAL = 0; /* Load the SysTick Counter Value */
SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk |

View File

@ -1,25 +1,39 @@
/**************************************************************************//**
* @file core_cm4_simd.h
* @brief CMSIS Cortex-M4 SIMD Header File
* @version V3.01
* @date 06. March 2012
* @version V3.20
* @date 25. February 2013
*
* @note
* Copyright (C) 2010-2012 ARM Limited. All rights reserved.
*
* @par
* ARM Limited (ARM) is supplying this software for use with Cortex-M
* processor based microcontrollers. This file can be freely distributed
* within development tools that are supporting such ARM based processors.
*
* @par
* THIS SOFTWARE IS PROVIDED "AS IS". NO WARRANTIES, WHETHER EXPRESS, IMPLIED
* OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE.
* ARM SHALL NOT, IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL, OR
* CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
*
******************************************************************************/
/* Copyright (c) 2009 - 2013 ARM LIMITED
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of ARM nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
*
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------*/
#ifdef __cplusplus
extern "C" {
@ -110,6 +124,8 @@
#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
#define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \
((int64_t)(ARG3) << 32) ) >> 32))
/*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
@ -624,6 +640,14 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1,
__RES; \
})
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
{
int32_t result;
__ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
return(result);
}
/*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/

View File

@ -1,25 +1,39 @@
/**************************************************************************//**
* @file core_cmFunc.h
* @brief CMSIS Cortex-M Core Function Access Header File
* @version V3.01
* @date 06. March 2012
* @version V3.20
* @date 25. February 2013
*
* @note
* Copyright (C) 2009-2012 ARM Limited. All rights reserved.
*
* @par
* ARM Limited (ARM) is supplying this software for use with Cortex-M
* processor based microcontrollers. This file can be freely distributed
* within development tools that are supporting such ARM based processors.
*
* @par
* THIS SOFTWARE IS PROVIDED "AS IS". NO WARRANTIES, WHETHER EXPRESS, IMPLIED
* OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE.
* ARM SHALL NOT, IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL, OR
* CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
*
******************************************************************************/
/* Copyright (c) 2009 - 2013 ARM LIMITED
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of ARM nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
*
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------*/
#ifndef __CORE_CMFUNC_H
#define __CORE_CMFUNC_H
@ -314,7 +328,7 @@ __STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_irq(void)
{
__ASM volatile ("cpsie i");
__ASM volatile ("cpsie i" : : : "memory");
}
@ -325,7 +339,7 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_irq(void)
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_irq(void)
{
__ASM volatile ("cpsid i");
__ASM volatile ("cpsid i" : : : "memory");
}
@ -352,7 +366,7 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CONTROL(void)
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_CONTROL(uint32_t control)
{
__ASM volatile ("MSR control, %0" : : "r" (control) );
__ASM volatile ("MSR control, %0" : : "r" (control) : "memory");
}
@ -424,7 +438,7 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PSP(void)
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PSP(uint32_t topOfProcStack)
{
__ASM volatile ("MSR psp, %0\n" : : "r" (topOfProcStack) );
__ASM volatile ("MSR psp, %0\n" : : "r" (topOfProcStack) : "sp");
}
@ -451,7 +465,7 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_MSP(void)
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_MSP(uint32_t topOfMainStack)
{
__ASM volatile ("MSR msp, %0\n" : : "r" (topOfMainStack) );
__ASM volatile ("MSR msp, %0\n" : : "r" (topOfMainStack) : "sp");
}
@ -478,7 +492,7 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PRIMASK(void)
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PRIMASK(uint32_t priMask)
{
__ASM volatile ("MSR primask, %0" : : "r" (priMask) );
__ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory");
}
@ -491,7 +505,7 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PRIMASK(uint32_t p
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_fault_irq(void)
{
__ASM volatile ("cpsie f");
__ASM volatile ("cpsie f" : : : "memory");
}
@ -502,7 +516,7 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_fault_irq(void)
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_fault_irq(void)
{
__ASM volatile ("cpsid f");
__ASM volatile ("cpsid f" : : : "memory");
}
@ -529,7 +543,7 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_BASEPRI(void)
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_BASEPRI(uint32_t value)
{
__ASM volatile ("MSR basepri, %0" : : "r" (value) );
__ASM volatile ("MSR basepri, %0" : : "r" (value) : "memory");
}
@ -556,7 +570,7 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FAULTMASK(void
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask)
{
__ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) );
__ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory");
}
#endif /* (__CORTEX_M >= 0x03) */
@ -575,7 +589,10 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPSCR(void)
#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
uint32_t result;
/* Empty asm statement works as a scheduling barrier */
__ASM volatile ("");
__ASM volatile ("VMRS %0, fpscr" : "=r" (result) );
__ASM volatile ("");
return(result);
#else
return(0);
@ -592,7 +609,10 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPSCR(void)
__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
{
#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
__ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) );
/* Empty asm statement works as a scheduling barrier */
__ASM volatile ("");
__ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc");
__ASM volatile ("");
#endif
}

View File

@ -1,25 +1,39 @@
/**************************************************************************//**
* @file core_cmInstr.h
* @brief CMSIS Cortex-M Core Instruction Access Header File
* @version V3.01
* @date 06. March 2012
* @version V3.20
* @date 05. March 2013
*
* @note
* Copyright (C) 2009-2012 ARM Limited. All rights reserved.
*
* @par
* ARM Limited (ARM) is supplying this software for use with Cortex-M
* processor based microcontrollers. This file can be freely distributed
* within development tools that are supporting such ARM based processors.
*
* @par
* THIS SOFTWARE IS PROVIDED "AS IS". NO WARRANTIES, WHETHER EXPRESS, IMPLIED
* OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE.
* ARM SHALL NOT, IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL, OR
* CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
*
******************************************************************************/
/* Copyright (c) 2009 - 2013 ARM LIMITED
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of ARM nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
*
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------*/
#ifndef __CORE_CMINSTR_H
#define __CORE_CMINSTR_H
@ -111,12 +125,13 @@
\param [in] value Value to reverse
\return Reversed value
*/
#ifndef __NO_EMBEDDED_ASM
__attribute__((section(".rev16_text"))) __STATIC_INLINE __ASM uint32_t __REV16(uint32_t value)
{
rev16 r0, r0
bx lr
}
#endif
/** \brief Reverse byte order in signed short value
@ -125,11 +140,13 @@ __attribute__((section(".rev16_text"))) __STATIC_INLINE __ASM uint32_t __REV16(u
\param [in] value Value to reverse
\return Reversed value
*/
#ifndef __NO_EMBEDDED_ASM
__attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int32_t __REVSH(int32_t value)
{
revsh r0, r0
bx lr
}
#endif
/** \brief Rotate Right in unsigned value (32 bit)
@ -143,6 +160,17 @@ __attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int32_t __REVSH(in
#define __ROR __ror
/** \brief Breakpoint
This function causes the processor to enter Debug state.
Debug tools can use this to investigate system state when the instruction at a particular address is reached.
\param [in] value is ignored by the processor.
If required, a debugger can use it to store additional information about the breakpoint.
*/
#define __BKPT(value) __breakpoint(value)
#if (__CORTEX_M >= 0x03)
/** \brief Reverse bit order of value
@ -279,6 +307,17 @@ __attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int32_t __REVSH(in
#elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
/* GNU gcc specific functions */
/* Define macros for porting to both thumb1 and thumb2.
* For thumb1, use low register (r0-r7), specified by constrant "l"
* Otherwise, use general registers, specified by constrant "r" */
#if defined (__thumb__) && !defined (__thumb2__)
#define __CMSIS_GCC_OUT_REG(r) "=l" (r)
#define __CMSIS_GCC_USE_REG(r) "l" (r)
#else
#define __CMSIS_GCC_OUT_REG(r) "=r" (r)
#define __CMSIS_GCC_USE_REG(r) "r" (r)
#endif
/** \brief No Operation
No Operation does nothing. This instruction can be used for code alignment purposes.
@ -364,10 +403,14 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE void __DMB(void)
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __REV(uint32_t value)
{
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
return __builtin_bswap32(value);
#else
uint32_t result;
__ASM volatile ("rev %0, %1" : "=r" (result) : "r" (value) );
__ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
return(result);
#endif
}
@ -382,7 +425,7 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __REV16(uint32_t val
{
uint32_t result;
__ASM volatile ("rev16 %0, %1" : "=r" (result) : "r" (value) );
__ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
return(result);
}
@ -396,10 +439,14 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __REV16(uint32_t val
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE int32_t __REVSH(int32_t value)
{
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
return (short)__builtin_bswap16(value);
#else
uint32_t result;
__ASM volatile ("revsh %0, %1" : "=r" (result) : "r" (value) );
__ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
return(result);
#endif
}
@ -413,12 +460,21 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE int32_t __REVSH(int32_t value
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
{
__ASM volatile ("ror %0, %0, %1" : "+r" (op1) : "r" (op2) );
return(op1);
return (op1 >> op2) | (op1 << (32 - op2));
}
/** \brief Breakpoint
This function causes the processor to enter Debug state.
Debug tools can use this to investigate system state when the instruction at a particular address is reached.
\param [in] value is ignored by the processor.
If required, a debugger can use it to store additional information about the breakpoint.
*/
#define __BKPT(value) __ASM volatile ("bkpt "#value)
#if (__CORTEX_M >= 0x03)
/** \brief Reverse bit order of value
@ -446,9 +502,16 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __RBIT(uint32_t valu
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE uint8_t __LDREXB(volatile uint8_t *addr)
{
uint8_t result;
uint32_t result;
__ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) );
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
__ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
#else
/* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
accepted by assembler. So has to use following less efficient pattern.
*/
__ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
#endif
return(result);
}
@ -462,9 +525,16 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE uint8_t __LDREXB(volatile uin
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE uint16_t __LDREXH(volatile uint16_t *addr)
{
uint16_t result;
uint32_t result;
__ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) );
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
__ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
#else
/* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
accepted by assembler. So has to use following less efficient pattern.
*/
__ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
#endif
return(result);
}
@ -480,7 +550,7 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __LDREXW(volatile ui
{
uint32_t result;
__ASM volatile ("ldrex %0, [%1]" : "=r" (result) : "r" (addr) );
__ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
return(result);
}
@ -498,7 +568,7 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __STREXB(uint8_t val
{
uint32_t result;
__ASM volatile ("strexb %0, %2, [%1]" : "=&r" (result) : "r" (addr), "r" (value) );
__ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
return(result);
}
@ -516,7 +586,7 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __STREXH(uint16_t va
{
uint32_t result;
__ASM volatile ("strexh %0, %2, [%1]" : "=&r" (result) : "r" (addr), "r" (value) );
__ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
return(result);
}
@ -534,7 +604,7 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __STREXW(uint32_t va
{
uint32_t result;
__ASM volatile ("strex %0, %2, [%1]" : "=&r" (result) : "r" (addr), "r" (value) );
__ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
return(result);
}
@ -546,7 +616,7 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __STREXW(uint32_t va
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __CLREX(void)
{
__ASM volatile ("clrex");
__ASM volatile ("clrex" ::: "memory");
}
@ -591,7 +661,7 @@ __attribute__( ( always_inline ) ) __STATIC_INLINE void __CLREX(void)
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE uint8_t __CLZ(uint32_t value)
{
uint8_t result;
uint32_t result;
__ASM volatile ("clz %0, %1" : "=r" (result) : "r" (value) );
return(result);

View File

@ -29,7 +29,7 @@
#include "cc3k/wlan.h"
#include "cc3k/nvmem.h"
mp_obj_t pyb_wlan_connect(int n_args, const mp_obj_t *args) {
mp_obj_t pyb_wlan_connect(uint n_args, const mp_obj_t *args) {
const char *ap;
const char *key;
if (n_args == 2) {